blob: 876802894b359d9f6a232ed02203596f48271d85 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger8474e5c2019-02-15 13:47:20 +0100158 { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100159 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
160 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
161 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100162 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163 { NULL }
164};
165
Collin L. Walling8fa16962016-07-26 15:29:44 -0400166struct kvm_s390_tod_clock_ext {
167 __u8 epoch_idx;
168 __u64 tod;
169 __u8 reserved[7];
170} __packed;
171
David Hildenbranda411edf2016-02-02 15:41:22 +0100172/* allow nested virtualization in KVM (if enabled by user space) */
173static int nested;
174module_param(nested, int, S_IRUGO);
175MODULE_PARM_DESC(nested, "Nested virtualization support");
176
Janosch Franka4499382018-07-13 11:28:31 +0100177/* allow 1m huge page guest backing, if !nested */
178static int hpage;
179module_param(hpage, int, 0444);
180MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100181
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500182/* maximum percentage of steal time for polling. >100 is treated like 100 */
183static u8 halt_poll_max_steal = 10;
184module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000185MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500186
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000187/*
188 * For now we handle at most 16 double words as this is what the s390 base
189 * kernel handles and stores in the prefix page. If we ever need to go beyond
190 * this, this requires changes to code, but the external uapi can stay.
191 */
192#define SIZE_INTERNAL 16
193
194/*
195 * Base feature mask that defines default mask for facilities. Consists of the
196 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
197 */
198static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
199/*
200 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
201 * and defines the facilities that can be enabled via a cpu model.
202 */
203static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
204
205static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200206{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
209 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
210 sizeof(S390_lowcore.stfle_fac_list));
211
212 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200213}
214
David Hildenbrand15c97052015-03-19 17:36:43 +0100215/* available cpu features supported by kvm */
216static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200217/* available subfunctions indicated via query / "test bit" */
218static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100219
Michael Mueller9d8d5782015-02-02 15:42:51 +0100220static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200221static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200222debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100223
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100224/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200225int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226{
227 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200228 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100229}
230
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700231int kvm_arch_check_processor_compat(void)
232{
233 return 0;
234}
235
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100236static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
237 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200238
David Hildenbrand15757672018-02-07 12:46:45 +0100239static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
240{
241 u8 delta_idx = 0;
242
243 /*
244 * The TOD jumps by delta, we have to compensate this by adding
245 * -delta to the epoch.
246 */
247 delta = -delta;
248
249 /* sign-extension - we're adding to signed values below */
250 if ((s64)delta < 0)
251 delta_idx = -1;
252
253 scb->epoch += delta;
254 if (scb->ecd & ECD_MEF) {
255 scb->epdx += delta_idx;
256 if (scb->epoch < delta)
257 scb->epdx += 1;
258 }
259}
260
Fan Zhangfdf03652015-05-13 10:58:41 +0200261/*
262 * This callback is executed during stop_machine(). All CPUs are therefore
263 * temporarily stopped. In order not to change guest behavior, we have to
264 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
265 * so a CPU won't be stopped while calculating with the epoch.
266 */
267static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
268 void *v)
269{
270 struct kvm *kvm;
271 struct kvm_vcpu *vcpu;
272 int i;
273 unsigned long long *delta = v;
274
275 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200276 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100277 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
278 if (i == 0) {
279 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
280 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
281 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100282 if (vcpu->arch.cputm_enabled)
283 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100284 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100285 kvm_clock_sync_scb(vcpu->arch.vsie_block,
286 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200287 }
288 }
289 return NOTIFY_OK;
290}
291
292static struct notifier_block kvm_clock_notifier = {
293 .notifier_call = kvm_clock_sync,
294};
295
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100296int kvm_arch_hardware_setup(void)
297{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200298 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100299 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200300 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
301 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
303 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100304 return 0;
305}
306
307void kvm_arch_hardware_unsetup(void)
308{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100309 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200310 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200311 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
312 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100313}
314
David Hildenbrand22be5a132016-01-21 13:22:54 +0100315static void allow_cpu_feat(unsigned long nr)
316{
317 set_bit_inv(nr, kvm_s390_available_cpu_feat);
318}
319
David Hildenbrand0a763c72016-05-18 16:03:47 +0200320static inline int plo_test_bit(unsigned char nr)
321{
322 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100323 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200324
325 asm volatile(
326 /* Parameter registers are ignored for "test bit" */
327 " plo 0,0,0,0(0)\n"
328 " ipm %0\n"
329 " srl %0,28\n"
330 : "=d" (cc)
331 : "d" (r0)
332 : "cc");
333 return cc == 0;
334}
335
Heiko Carstensd0dea732019-10-02 14:34:37 +0200336static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500337{
338 register unsigned long r0 asm("0") = 0; /* query function */
339 register unsigned long r1 asm("1") = (unsigned long) query;
340
341 asm volatile(
342 /* Parameter regs are ignored */
343 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200344 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500345 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200346 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500347}
348
Christian Borntraeger173aec22018-12-28 10:59:06 +0100349#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100350#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100351
David Hildenbrand22be5a132016-01-21 13:22:54 +0100352static void kvm_s390_cpu_feat_init(void)
353{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200354 int i;
355
356 for (i = 0; i < 256; ++i) {
357 if (plo_test_bit(i))
358 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
359 }
360
361 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400362 ptff(kvm_s390_available_subfunc.ptff,
363 sizeof(kvm_s390_available_subfunc.ptff),
364 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200365
366 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200367 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
368 kvm_s390_available_subfunc.kmac);
369 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.kmc);
371 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
372 kvm_s390_available_subfunc.km);
373 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.kimd);
375 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200377 }
378 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200379 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200381 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200382 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.kmctr);
384 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
385 kvm_s390_available_subfunc.kmf);
386 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
387 kvm_s390_available_subfunc.kmo);
388 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
389 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200390 }
391 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100392 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200394
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400395 if (test_facility(146)) /* MSA8 */
396 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
397 kvm_s390_available_subfunc.kma);
398
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100399 if (test_facility(155)) /* MSA9 */
400 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
401 kvm_s390_available_subfunc.kdsa);
402
Christian Borntraeger173aec22018-12-28 10:59:06 +0100403 if (test_facility(150)) /* SORTL */
404 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
405
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100406 if (test_facility(151)) /* DFLTCC */
407 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
408
David Hildenbrand22be5a132016-01-21 13:22:54 +0100409 if (MACHINE_HAS_ESOP)
410 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200411 /*
412 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
413 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
414 */
415 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100416 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200417 return;
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100419 if (sclp.has_64bscao)
420 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100421 if (sclp.has_siif)
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100423 if (sclp.has_gpere)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100425 if (sclp.has_gsls)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100427 if (sclp.has_ib)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100429 if (sclp.has_cei)
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100431 if (sclp.has_ibs)
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500433 if (sclp.has_kss)
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200435 /*
436 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
437 * all skey handling functions read/set the skey from the PGSTE
438 * instead of the real storage key.
439 *
440 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
441 * pages being detected as preserved although they are resident.
442 *
443 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
444 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
445 *
446 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
447 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
448 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
449 *
450 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
451 * cannot easily shadow the SCA because of the ipte lock.
452 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100453}
454
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100455int kvm_arch_init(void *opaque)
456{
Janosch Frankf76f6372019-10-02 03:56:27 -0400457 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100458
Christian Borntraeger78f26132015-07-22 15:50:58 +0200459 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
460 if (!kvm_s390_dbf)
461 return -ENOMEM;
462
Janosch Frankf76f6372019-10-02 03:56:27 -0400463 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
464 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200465
David Hildenbrand22be5a132016-01-21 13:22:54 +0100466 kvm_s390_cpu_feat_init();
467
Cornelia Huck84877d92014-09-02 10:27:35 +0100468 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100469 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
470 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100471 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400472 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100473 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100474
475 rc = kvm_s390_gib_init(GAL_ISC);
476 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400477 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100478
Michael Mueller308c3e62018-11-30 15:32:06 +0100479 return 0;
480
Janosch Frankf76f6372019-10-02 03:56:27 -0400481out:
482 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100483 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100484}
485
Christian Borntraeger78f26132015-07-22 15:50:58 +0200486void kvm_arch_exit(void)
487{
Michael Mueller1282c212019-01-31 09:52:40 +0100488 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489 debug_unregister(kvm_s390_dbf);
490}
491
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100492/* Section: device related */
493long kvm_arch_dev_ioctl(struct file *filp,
494 unsigned int ioctl, unsigned long arg)
495{
496 if (ioctl == KVM_S390_ENABLE_SIE)
497 return s390_enable_sie();
498 return -EINVAL;
499}
500
Alexander Graf784aa3d2014-07-14 18:27:35 +0200501int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100502{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100503 int r;
504
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200505 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100506 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200507 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100508 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100509#ifdef CONFIG_KVM_S390_UCONTROL
510 case KVM_CAP_S390_UCONTROL:
511#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200512 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100513 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200514 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100515 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100516 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100517 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200518 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200519 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200520 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200521 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100522 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100523 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200524 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100525 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400526 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100527 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200528 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200529 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100530 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100531 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100532 r = 1;
533 break;
Janosch Franka4499382018-07-13 11:28:31 +0100534 case KVM_CAP_S390_HPAGE_1M:
535 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100536 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100537 r = 1;
538 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100539 case KVM_CAP_S390_MEM_OP:
540 r = MEM_OP_MAX_SIZE;
541 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200542 case KVM_CAP_NR_VCPUS:
543 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200544 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100545 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200546 if (!kvm_s390_use_sca_entries())
547 r = KVM_MAX_VCPUS;
548 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100549 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200550 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200551 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100552 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200553 break;
Eric Farman68c55752014-06-09 10:57:26 -0400554 case KVM_CAP_S390_VECTOR_REGISTERS:
555 r = MACHINE_HAS_VX;
556 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800557 case KVM_CAP_S390_RI:
558 r = test_facility(64);
559 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100560 case KVM_CAP_S390_GS:
561 r = test_facility(133);
562 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100563 case KVM_CAP_S390_BPB:
564 r = test_facility(82);
565 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200566 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200568 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100569 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100570}
571
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400572static void kvm_s390_sync_dirty_log(struct kvm *kvm,
Janosch Frank0959e162018-07-17 13:21:22 +0100573 struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400574{
Janosch Frank0959e162018-07-17 13:21:22 +0100575 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400576 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100577 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400578 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100579 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580
Janosch Frank0959e162018-07-17 13:21:22 +0100581 /* Loop over all guest segments */
582 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100584 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
585 gaddr = gfn_to_gpa(cur_gfn);
586 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
587 if (kvm_is_error_hva(vmaddr))
588 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400589
Janosch Frank0959e162018-07-17 13:21:22 +0100590 bitmap_zero(bitmap, _PAGE_ENTRIES);
591 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
592 for (i = 0; i < _PAGE_ENTRIES; i++) {
593 if (test_bit(i, bitmap))
594 mark_page_dirty(kvm, cur_gfn + i);
595 }
596
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100597 if (fatal_signal_pending(current))
598 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100599 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400600 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400601}
602
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100603/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200604static void sca_del_vcpu(struct kvm_vcpu *vcpu);
605
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100606/*
607 * Get (and clear) the dirty memory log for a memory slot.
608 */
609int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
610 struct kvm_dirty_log *log)
611{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400612 int r;
613 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200614 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400615 struct kvm_memory_slot *memslot;
616 int is_dirty = 0;
617
Janosch Franke1e8a962017-02-02 16:39:31 +0100618 if (kvm_is_ucontrol(kvm))
619 return -EINVAL;
620
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400621 mutex_lock(&kvm->slots_lock);
622
623 r = -EINVAL;
624 if (log->slot >= KVM_USER_MEM_SLOTS)
625 goto out;
626
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200627 slots = kvm_memslots(kvm);
628 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400629 r = -ENOENT;
630 if (!memslot->dirty_bitmap)
631 goto out;
632
633 kvm_s390_sync_dirty_log(kvm, memslot);
634 r = kvm_get_dirty_log(kvm, log, &is_dirty);
635 if (r)
636 goto out;
637
638 /* Clear the dirty log */
639 if (is_dirty) {
640 n = kvm_dirty_bitmap_bytes(memslot);
641 memset(memslot->dirty_bitmap, 0, n);
642 }
643 r = 0;
644out:
645 mutex_unlock(&kvm->slots_lock);
646 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100647}
648
David Hildenbrand6502a342016-06-21 14:19:51 +0200649static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
650{
651 unsigned int i;
652 struct kvm_vcpu *vcpu;
653
654 kvm_for_each_vcpu(i, vcpu, kvm) {
655 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
656 }
657}
658
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100659int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200660{
661 int r;
662
663 if (cap->flags)
664 return -EINVAL;
665
666 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200667 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200668 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200669 kvm->arch.use_irqchip = 1;
670 r = 0;
671 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200672 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200673 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200674 kvm->arch.user_sigp = 1;
675 r = 0;
676 break;
Eric Farman68c55752014-06-09 10:57:26 -0400677 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100678 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200679 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100680 r = -EBUSY;
681 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100682 set_kvm_facility(kvm->arch.model.fac_mask, 129);
683 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200684 if (test_facility(134)) {
685 set_kvm_facility(kvm->arch.model.fac_mask, 134);
686 set_kvm_facility(kvm->arch.model.fac_list, 134);
687 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100688 if (test_facility(135)) {
689 set_kvm_facility(kvm->arch.model.fac_mask, 135);
690 set_kvm_facility(kvm->arch.model.fac_list, 135);
691 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100692 if (test_facility(148)) {
693 set_kvm_facility(kvm->arch.model.fac_mask, 148);
694 set_kvm_facility(kvm->arch.model.fac_list, 148);
695 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100696 if (test_facility(152)) {
697 set_kvm_facility(kvm->arch.model.fac_mask, 152);
698 set_kvm_facility(kvm->arch.model.fac_list, 152);
699 }
Michael Mueller18280d82015-03-16 16:05:41 +0100700 r = 0;
701 } else
702 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100703 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200704 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
705 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400706 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800707 case KVM_CAP_S390_RI:
708 r = -EINVAL;
709 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200710 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800711 r = -EBUSY;
712 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100713 set_kvm_facility(kvm->arch.model.fac_mask, 64);
714 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800715 r = 0;
716 }
717 mutex_unlock(&kvm->lock);
718 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
719 r ? "(not available)" : "(success)");
720 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100721 case KVM_CAP_S390_AIS:
722 mutex_lock(&kvm->lock);
723 if (kvm->created_vcpus) {
724 r = -EBUSY;
725 } else {
726 set_kvm_facility(kvm->arch.model.fac_mask, 72);
727 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100728 r = 0;
729 }
730 mutex_unlock(&kvm->lock);
731 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
732 r ? "(not available)" : "(success)");
733 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100734 case KVM_CAP_S390_GS:
735 r = -EINVAL;
736 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100737 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100738 r = -EBUSY;
739 } else if (test_facility(133)) {
740 set_kvm_facility(kvm->arch.model.fac_mask, 133);
741 set_kvm_facility(kvm->arch.model.fac_list, 133);
742 r = 0;
743 }
744 mutex_unlock(&kvm->lock);
745 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
746 r ? "(not available)" : "(success)");
747 break;
Janosch Franka4499382018-07-13 11:28:31 +0100748 case KVM_CAP_S390_HPAGE_1M:
749 mutex_lock(&kvm->lock);
750 if (kvm->created_vcpus)
751 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100752 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100753 r = -EINVAL;
754 else {
755 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200756 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100757 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200758 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100759 /*
760 * We might have to create fake 4k page
761 * tables. To avoid that the hardware works on
762 * stale PGSTEs, we emulate these instructions.
763 */
764 kvm->arch.use_skf = 0;
765 kvm->arch.use_pfmfi = 0;
766 }
767 mutex_unlock(&kvm->lock);
768 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
769 r ? "(not available)" : "(success)");
770 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100771 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200772 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100773 kvm->arch.user_stsi = 1;
774 r = 0;
775 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200776 case KVM_CAP_S390_USER_INSTR0:
777 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
778 kvm->arch.user_instr0 = 1;
779 icpt_operexc_on_all_vcpus(kvm);
780 r = 0;
781 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200782 default:
783 r = -EINVAL;
784 break;
785 }
786 return r;
787}
788
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100789static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
790{
791 int ret;
792
793 switch (attr->attr) {
794 case KVM_S390_VM_MEM_LIMIT_SIZE:
795 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200796 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100797 kvm->arch.mem_limit);
798 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100799 ret = -EFAULT;
800 break;
801 default:
802 ret = -ENXIO;
803 break;
804 }
805 return ret;
806}
807
808static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200809{
810 int ret;
811 unsigned int idx;
812 switch (attr->attr) {
813 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100814 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100815 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200816 break;
817
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200818 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200819 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100820 if (kvm->created_vcpus)
821 ret = -EBUSY;
822 else if (kvm->mm->context.allow_gmap_hpage_1m)
823 ret = -EINVAL;
824 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200825 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100826 /* Not compatible with cmma. */
827 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200828 ret = 0;
829 }
830 mutex_unlock(&kvm->lock);
831 break;
832 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100833 ret = -ENXIO;
834 if (!sclp.has_cmma)
835 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200836 ret = -EINVAL;
837 if (!kvm->arch.use_cmma)
838 break;
839
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200840 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200841 mutex_lock(&kvm->lock);
842 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200843 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200844 srcu_read_unlock(&kvm->srcu, idx);
845 mutex_unlock(&kvm->lock);
846 ret = 0;
847 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100848 case KVM_S390_VM_MEM_LIMIT_SIZE: {
849 unsigned long new_limit;
850
851 if (kvm_is_ucontrol(kvm))
852 return -EINVAL;
853
854 if (get_user(new_limit, (u64 __user *)attr->addr))
855 return -EFAULT;
856
Dominik Dingela3a92c32014-12-01 17:24:42 +0100857 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
858 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100859 return -E2BIG;
860
Dominik Dingela3a92c32014-12-01 17:24:42 +0100861 if (!new_limit)
862 return -EINVAL;
863
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100864 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100865 if (new_limit != KVM_S390_NO_MEM_LIMIT)
866 new_limit -= 1;
867
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100868 ret = -EBUSY;
869 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200870 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100871 /* gmap_create will round the limit up */
872 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100873
874 if (!new) {
875 ret = -ENOMEM;
876 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100877 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100878 new->private = kvm;
879 kvm->arch.gmap = new;
880 ret = 0;
881 }
882 }
883 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100884 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
885 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
886 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100887 break;
888 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200889 default:
890 ret = -ENXIO;
891 break;
892 }
893 return ret;
894}
895
Tony Krowiaka374e892014-09-03 10:13:53 +0200896static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
897
Tony Krowiak20c922f2018-04-22 11:37:03 -0400898void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200899{
900 struct kvm_vcpu *vcpu;
901 int i;
902
Tony Krowiak20c922f2018-04-22 11:37:03 -0400903 kvm_s390_vcpu_block_all(kvm);
904
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400905 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400906 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400907 /* recreate the shadow crycb by leaving the VSIE handler */
908 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
909 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400910
911 kvm_s390_vcpu_unblock_all(kvm);
912}
913
914static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
915{
Tony Krowiaka374e892014-09-03 10:13:53 +0200916 mutex_lock(&kvm->lock);
917 switch (attr->attr) {
918 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200919 if (!test_kvm_facility(kvm, 76)) {
920 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400921 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200922 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200923 get_random_bytes(
924 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
925 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
926 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200927 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200928 break;
929 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200930 if (!test_kvm_facility(kvm, 76)) {
931 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400932 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200934 get_random_bytes(
935 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
936 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
937 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200938 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200939 break;
940 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 if (!test_kvm_facility(kvm, 76)) {
942 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400943 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200945 kvm->arch.crypto.aes_kw = 0;
946 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
947 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200948 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200949 break;
950 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200951 if (!test_kvm_facility(kvm, 76)) {
952 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400953 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200954 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200955 kvm->arch.crypto.dea_kw = 0;
956 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
957 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200958 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200959 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400960 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
961 if (!ap_instructions_available()) {
962 mutex_unlock(&kvm->lock);
963 return -EOPNOTSUPP;
964 }
965 kvm->arch.crypto.apie = 1;
966 break;
967 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
968 if (!ap_instructions_available()) {
969 mutex_unlock(&kvm->lock);
970 return -EOPNOTSUPP;
971 }
972 kvm->arch.crypto.apie = 0;
973 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200974 default:
975 mutex_unlock(&kvm->lock);
976 return -ENXIO;
977 }
978
Tony Krowiak20c922f2018-04-22 11:37:03 -0400979 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200980 mutex_unlock(&kvm->lock);
981 return 0;
982}
983
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200984static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
985{
986 int cx;
987 struct kvm_vcpu *vcpu;
988
989 kvm_for_each_vcpu(cx, vcpu, kvm)
990 kvm_s390_sync_request(req, vcpu);
991}
992
993/*
994 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100995 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200996 */
997static int kvm_s390_vm_start_migration(struct kvm *kvm)
998{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001000 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001001 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001002 int slotnr;
1003
1004 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001005 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001006 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001007 slots = kvm_memslots(kvm);
1008 if (!slots || !slots->used_slots)
1009 return -EINVAL;
1010
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001011 if (!kvm->arch.use_cmma) {
1012 kvm->arch.migration_mode = 1;
1013 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001014 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001015 /* mark all the pages in active slots as dirty */
1016 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1017 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001018 if (!ms->dirty_bitmap)
1019 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001020 /*
1021 * The second half of the bitmap is only used on x86,
1022 * and would be wasted otherwise, so we put it to good
1023 * use here to keep track of the state of the storage
1024 * attributes.
1025 */
1026 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1027 ram_pages += ms->npages;
1028 }
1029 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1030 kvm->arch.migration_mode = 1;
1031 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001032 return 0;
1033}
1034
1035/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001036 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037 * kvm_s390_vm_start_migration.
1038 */
1039static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1040{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001041 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001042 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001044 kvm->arch.migration_mode = 0;
1045 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 return 0;
1048}
1049
1050static int kvm_s390_vm_set_migration(struct kvm *kvm,
1051 struct kvm_device_attr *attr)
1052{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001053 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001055 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001056 switch (attr->attr) {
1057 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001058 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001059 break;
1060 case KVM_S390_VM_MIGRATION_STOP:
1061 res = kvm_s390_vm_stop_migration(kvm);
1062 break;
1063 default:
1064 break;
1065 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001066 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001067
1068 return res;
1069}
1070
1071static int kvm_s390_vm_get_migration(struct kvm *kvm,
1072 struct kvm_device_attr *attr)
1073{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001074 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001075
1076 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1077 return -ENXIO;
1078
1079 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1080 return -EFAULT;
1081 return 0;
1082}
1083
Collin L. Walling8fa16962016-07-26 15:29:44 -04001084static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1085{
1086 struct kvm_s390_vm_tod_clock gtod;
1087
1088 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1089 return -EFAULT;
1090
David Hildenbrand0e7def52018-02-07 12:46:43 +01001091 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001092 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001093 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001094
1095 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1096 gtod.epoch_idx, gtod.tod);
1097
1098 return 0;
1099}
1100
Jason J. Herne72f25022014-11-25 09:46:02 -05001101static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1102{
1103 u8 gtod_high;
1104
1105 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1106 sizeof(gtod_high)))
1107 return -EFAULT;
1108
1109 if (gtod_high != 0)
1110 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001111 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001112
1113 return 0;
1114}
1115
1116static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1117{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001118 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001119
David Hildenbrand0e7def52018-02-07 12:46:43 +01001120 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1121 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001122 return -EFAULT;
1123
David Hildenbrand0e7def52018-02-07 12:46:43 +01001124 kvm_s390_set_tod_clock(kvm, &gtod);
1125 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001126 return 0;
1127}
1128
1129static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1130{
1131 int ret;
1132
1133 if (attr->flags)
1134 return -EINVAL;
1135
1136 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001137 case KVM_S390_VM_TOD_EXT:
1138 ret = kvm_s390_set_tod_ext(kvm, attr);
1139 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001140 case KVM_S390_VM_TOD_HIGH:
1141 ret = kvm_s390_set_tod_high(kvm, attr);
1142 break;
1143 case KVM_S390_VM_TOD_LOW:
1144 ret = kvm_s390_set_tod_low(kvm, attr);
1145 break;
1146 default:
1147 ret = -ENXIO;
1148 break;
1149 }
1150 return ret;
1151}
1152
David Hildenbrand33d1b272018-04-27 14:36:13 +02001153static void kvm_s390_get_tod_clock(struct kvm *kvm,
1154 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001155{
1156 struct kvm_s390_tod_clock_ext htod;
1157
1158 preempt_disable();
1159
1160 get_tod_clock_ext((char *)&htod);
1161
1162 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001163 gtod->epoch_idx = 0;
1164 if (test_kvm_facility(kvm, 139)) {
1165 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1166 if (gtod->tod < htod.tod)
1167 gtod->epoch_idx += 1;
1168 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001169
1170 preempt_enable();
1171}
1172
1173static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1174{
1175 struct kvm_s390_vm_tod_clock gtod;
1176
1177 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001178 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001179 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1180 return -EFAULT;
1181
1182 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1183 gtod.epoch_idx, gtod.tod);
1184 return 0;
1185}
1186
Jason J. Herne72f25022014-11-25 09:46:02 -05001187static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1188{
1189 u8 gtod_high = 0;
1190
1191 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1192 sizeof(gtod_high)))
1193 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001194 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001195
1196 return 0;
1197}
1198
1199static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1200{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001201 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001202
David Hildenbrand60417fc2015-09-29 16:20:36 +02001203 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001204 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1205 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001206 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001207
1208 return 0;
1209}
1210
1211static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1212{
1213 int ret;
1214
1215 if (attr->flags)
1216 return -EINVAL;
1217
1218 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001219 case KVM_S390_VM_TOD_EXT:
1220 ret = kvm_s390_get_tod_ext(kvm, attr);
1221 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001222 case KVM_S390_VM_TOD_HIGH:
1223 ret = kvm_s390_get_tod_high(kvm, attr);
1224 break;
1225 case KVM_S390_VM_TOD_LOW:
1226 ret = kvm_s390_get_tod_low(kvm, attr);
1227 break;
1228 default:
1229 ret = -ENXIO;
1230 break;
1231 }
1232 return ret;
1233}
1234
Michael Mueller658b6ed2015-02-02 15:49:35 +01001235static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1236{
1237 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001238 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001239 int ret = 0;
1240
1241 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001242 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001243 ret = -EBUSY;
1244 goto out;
1245 }
1246 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1247 if (!proc) {
1248 ret = -ENOMEM;
1249 goto out;
1250 }
1251 if (!copy_from_user(proc, (void __user *)attr->addr,
1252 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001253 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001254 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1255 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001256 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001257 if (proc->ibc > unblocked_ibc)
1258 kvm->arch.model.ibc = unblocked_ibc;
1259 else if (proc->ibc < lowest_ibc)
1260 kvm->arch.model.ibc = lowest_ibc;
1261 else
1262 kvm->arch.model.ibc = proc->ibc;
1263 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001264 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001265 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001266 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1267 kvm->arch.model.ibc,
1268 kvm->arch.model.cpuid);
1269 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1270 kvm->arch.model.fac_list[0],
1271 kvm->arch.model.fac_list[1],
1272 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001273 } else
1274 ret = -EFAULT;
1275 kfree(proc);
1276out:
1277 mutex_unlock(&kvm->lock);
1278 return ret;
1279}
1280
David Hildenbrand15c97052015-03-19 17:36:43 +01001281static int kvm_s390_set_processor_feat(struct kvm *kvm,
1282 struct kvm_device_attr *attr)
1283{
1284 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001285
1286 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1287 return -EFAULT;
1288 if (!bitmap_subset((unsigned long *) data.feat,
1289 kvm_s390_available_cpu_feat,
1290 KVM_S390_VM_CPU_FEAT_NR_BITS))
1291 return -EINVAL;
1292
1293 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001294 if (kvm->created_vcpus) {
1295 mutex_unlock(&kvm->lock);
1296 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001297 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001298 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1299 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001300 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001301 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1302 data.feat[0],
1303 data.feat[1],
1304 data.feat[2]);
1305 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001306}
1307
David Hildenbrand0a763c72016-05-18 16:03:47 +02001308static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1309 struct kvm_device_attr *attr)
1310{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001311 mutex_lock(&kvm->lock);
1312 if (kvm->created_vcpus) {
1313 mutex_unlock(&kvm->lock);
1314 return -EBUSY;
1315 }
1316
1317 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1318 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1319 mutex_unlock(&kvm->lock);
1320 return -EFAULT;
1321 }
1322 mutex_unlock(&kvm->lock);
1323
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001324 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1325 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1326 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1327 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1328 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1329 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1330 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1331 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1332 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1333 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1334 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1335 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1338 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1339 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1340 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1341 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1342 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1344 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1345 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1346 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1347 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1348 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1349 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1350 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1351 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1352 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1353 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1355 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1356 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1357 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1359 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1360 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1361 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1362 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1363 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1365 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001368 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001371 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001376 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1379 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001381
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001382 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001383}
1384
Michael Mueller658b6ed2015-02-02 15:49:35 +01001385static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1386{
1387 int ret = -ENXIO;
1388
1389 switch (attr->attr) {
1390 case KVM_S390_VM_CPU_PROCESSOR:
1391 ret = kvm_s390_set_processor(kvm, attr);
1392 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001393 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1394 ret = kvm_s390_set_processor_feat(kvm, attr);
1395 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001396 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1397 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1398 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001399 }
1400 return ret;
1401}
1402
1403static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1404{
1405 struct kvm_s390_vm_cpu_processor *proc;
1406 int ret = 0;
1407
1408 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1409 if (!proc) {
1410 ret = -ENOMEM;
1411 goto out;
1412 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001413 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001414 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001415 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1416 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001417 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1418 kvm->arch.model.ibc,
1419 kvm->arch.model.cpuid);
1420 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1421 kvm->arch.model.fac_list[0],
1422 kvm->arch.model.fac_list[1],
1423 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001424 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1425 ret = -EFAULT;
1426 kfree(proc);
1427out:
1428 return ret;
1429}
1430
1431static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1432{
1433 struct kvm_s390_vm_cpu_machine *mach;
1434 int ret = 0;
1435
1436 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1437 if (!mach) {
1438 ret = -ENOMEM;
1439 goto out;
1440 }
1441 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001442 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001443 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001444 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001445 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001446 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001447 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1448 kvm->arch.model.ibc,
1449 kvm->arch.model.cpuid);
1450 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1451 mach->fac_mask[0],
1452 mach->fac_mask[1],
1453 mach->fac_mask[2]);
1454 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1455 mach->fac_list[0],
1456 mach->fac_list[1],
1457 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001458 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1459 ret = -EFAULT;
1460 kfree(mach);
1461out:
1462 return ret;
1463}
1464
David Hildenbrand15c97052015-03-19 17:36:43 +01001465static int kvm_s390_get_processor_feat(struct kvm *kvm,
1466 struct kvm_device_attr *attr)
1467{
1468 struct kvm_s390_vm_cpu_feat data;
1469
1470 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1471 KVM_S390_VM_CPU_FEAT_NR_BITS);
1472 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1473 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001474 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1475 data.feat[0],
1476 data.feat[1],
1477 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001478 return 0;
1479}
1480
1481static int kvm_s390_get_machine_feat(struct kvm *kvm,
1482 struct kvm_device_attr *attr)
1483{
1484 struct kvm_s390_vm_cpu_feat data;
1485
1486 bitmap_copy((unsigned long *) data.feat,
1487 kvm_s390_available_cpu_feat,
1488 KVM_S390_VM_CPU_FEAT_NR_BITS);
1489 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1490 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001491 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1492 data.feat[0],
1493 data.feat[1],
1494 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001495 return 0;
1496}
1497
David Hildenbrand0a763c72016-05-18 16:03:47 +02001498static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1499 struct kvm_device_attr *attr)
1500{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001501 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1502 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1503 return -EFAULT;
1504
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001505 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1508 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1509 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1510 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1511 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1512 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1513 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1514 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1516 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1519 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1520 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1521 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1522 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1523 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1525 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1526 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1527 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1528 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1529 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1530 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1531 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1532 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1534 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1536 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1537 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1538 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1540 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1541 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1542 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1543 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1546 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001549 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001552 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001557 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001562
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001563 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001564}
1565
1566static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1567 struct kvm_device_attr *attr)
1568{
1569 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1570 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1571 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001572
1573 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1574 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1575 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1576 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1577 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1578 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1579 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1580 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1581 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1583 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1584 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1586 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1587 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1588 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1589 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1590 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1591 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1592 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1593 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1594 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1595 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1596 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1597 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1598 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1599 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1600 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1601 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1602 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1603 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1604 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1605 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1606 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1607 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1608 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1609 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1610 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1611 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1612 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1613 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1614 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1615 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1616 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001617 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001620 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1623 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1624 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001625 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1628 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1629 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001630
David Hildenbrand0a763c72016-05-18 16:03:47 +02001631 return 0;
1632}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001633
Michael Mueller658b6ed2015-02-02 15:49:35 +01001634static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1635{
1636 int ret = -ENXIO;
1637
1638 switch (attr->attr) {
1639 case KVM_S390_VM_CPU_PROCESSOR:
1640 ret = kvm_s390_get_processor(kvm, attr);
1641 break;
1642 case KVM_S390_VM_CPU_MACHINE:
1643 ret = kvm_s390_get_machine(kvm, attr);
1644 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001645 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1646 ret = kvm_s390_get_processor_feat(kvm, attr);
1647 break;
1648 case KVM_S390_VM_CPU_MACHINE_FEAT:
1649 ret = kvm_s390_get_machine_feat(kvm, attr);
1650 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001651 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1652 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1653 break;
1654 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1655 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1656 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001657 }
1658 return ret;
1659}
1660
Dominik Dingelf2061652014-04-09 13:13:00 +02001661static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1662{
1663 int ret;
1664
1665 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001666 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001667 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001668 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001669 case KVM_S390_VM_TOD:
1670 ret = kvm_s390_set_tod(kvm, attr);
1671 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001672 case KVM_S390_VM_CPU_MODEL:
1673 ret = kvm_s390_set_cpu_model(kvm, attr);
1674 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001675 case KVM_S390_VM_CRYPTO:
1676 ret = kvm_s390_vm_set_crypto(kvm, attr);
1677 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001678 case KVM_S390_VM_MIGRATION:
1679 ret = kvm_s390_vm_set_migration(kvm, attr);
1680 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001681 default:
1682 ret = -ENXIO;
1683 break;
1684 }
1685
1686 return ret;
1687}
1688
1689static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1690{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001691 int ret;
1692
1693 switch (attr->group) {
1694 case KVM_S390_VM_MEM_CTRL:
1695 ret = kvm_s390_get_mem_control(kvm, attr);
1696 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001697 case KVM_S390_VM_TOD:
1698 ret = kvm_s390_get_tod(kvm, attr);
1699 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001700 case KVM_S390_VM_CPU_MODEL:
1701 ret = kvm_s390_get_cpu_model(kvm, attr);
1702 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001703 case KVM_S390_VM_MIGRATION:
1704 ret = kvm_s390_vm_get_migration(kvm, attr);
1705 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001706 default:
1707 ret = -ENXIO;
1708 break;
1709 }
1710
1711 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001712}
1713
1714static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1715{
1716 int ret;
1717
1718 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001719 case KVM_S390_VM_MEM_CTRL:
1720 switch (attr->attr) {
1721 case KVM_S390_VM_MEM_ENABLE_CMMA:
1722 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001723 ret = sclp.has_cmma ? 0 : -ENXIO;
1724 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001725 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001726 ret = 0;
1727 break;
1728 default:
1729 ret = -ENXIO;
1730 break;
1731 }
1732 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001733 case KVM_S390_VM_TOD:
1734 switch (attr->attr) {
1735 case KVM_S390_VM_TOD_LOW:
1736 case KVM_S390_VM_TOD_HIGH:
1737 ret = 0;
1738 break;
1739 default:
1740 ret = -ENXIO;
1741 break;
1742 }
1743 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001744 case KVM_S390_VM_CPU_MODEL:
1745 switch (attr->attr) {
1746 case KVM_S390_VM_CPU_PROCESSOR:
1747 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001748 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1749 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001750 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001751 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001752 ret = 0;
1753 break;
1754 default:
1755 ret = -ENXIO;
1756 break;
1757 }
1758 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001759 case KVM_S390_VM_CRYPTO:
1760 switch (attr->attr) {
1761 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1762 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1763 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1764 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1765 ret = 0;
1766 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001767 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1768 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1769 ret = ap_instructions_available() ? 0 : -ENXIO;
1770 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001771 default:
1772 ret = -ENXIO;
1773 break;
1774 }
1775 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001776 case KVM_S390_VM_MIGRATION:
1777 ret = 0;
1778 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001779 default:
1780 ret = -ENXIO;
1781 break;
1782 }
1783
1784 return ret;
1785}
1786
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001787static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1788{
1789 uint8_t *keys;
1790 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001791 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001792
1793 if (args->flags != 0)
1794 return -EINVAL;
1795
1796 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001797 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001798 return KVM_S390_GET_SKEYS_NONE;
1799
1800 /* Enforce sane limit on memory allocation */
1801 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1802 return -EINVAL;
1803
Michal Hocko752ade62017-05-08 15:57:27 -07001804 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001805 if (!keys)
1806 return -ENOMEM;
1807
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001808 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001809 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001810 for (i = 0; i < args->count; i++) {
1811 hva = gfn_to_hva(kvm, args->start_gfn + i);
1812 if (kvm_is_error_hva(hva)) {
1813 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001814 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001815 }
1816
David Hildenbrand154c8c12016-05-09 11:22:34 +02001817 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1818 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001819 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001820 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001821 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001822 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001824 if (!r) {
1825 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1826 sizeof(uint8_t) * args->count);
1827 if (r)
1828 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001829 }
1830
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001831 kvfree(keys);
1832 return r;
1833}
1834
1835static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1836{
1837 uint8_t *keys;
1838 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001839 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001840 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001841
1842 if (args->flags != 0)
1843 return -EINVAL;
1844
1845 /* Enforce sane limit on memory allocation */
1846 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1847 return -EINVAL;
1848
Michal Hocko752ade62017-05-08 15:57:27 -07001849 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001850 if (!keys)
1851 return -ENOMEM;
1852
1853 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1854 sizeof(uint8_t) * args->count);
1855 if (r) {
1856 r = -EFAULT;
1857 goto out;
1858 }
1859
1860 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001861 r = s390_enable_skey();
1862 if (r)
1863 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001864
Janosch Frankbd096f62018-07-18 13:40:22 +01001865 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001866 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001867 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001868 while (i < args->count) {
1869 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001870 hva = gfn_to_hva(kvm, args->start_gfn + i);
1871 if (kvm_is_error_hva(hva)) {
1872 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001873 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001874 }
1875
1876 /* Lowest order bit is reserved */
1877 if (keys[i] & 0x01) {
1878 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001879 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001880 }
1881
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001882 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001883 if (r) {
1884 r = fixup_user_fault(current, current->mm, hva,
1885 FAULT_FLAG_WRITE, &unlocked);
1886 if (r)
1887 break;
1888 }
1889 if (!r)
1890 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001891 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001892 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001893 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001894out:
1895 kvfree(keys);
1896 return r;
1897}
1898
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001899/*
1900 * Base address and length must be sent at the start of each block, therefore
1901 * it's cheaper to send some clean data, as long as it's less than the size of
1902 * two longs.
1903 */
1904#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1905/* for consistency */
1906#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1907
1908/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001909 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1910 * address falls in a hole. In that case the index of one of the memslots
1911 * bordering the hole is returned.
1912 */
1913static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1914{
1915 int start = 0, end = slots->used_slots;
1916 int slot = atomic_read(&slots->lru_slot);
1917 struct kvm_memory_slot *memslots = slots->memslots;
1918
1919 if (gfn >= memslots[slot].base_gfn &&
1920 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1921 return slot;
1922
1923 while (start < end) {
1924 slot = start + (end - start) / 2;
1925
1926 if (gfn >= memslots[slot].base_gfn)
1927 end = slot;
1928 else
1929 start = slot + 1;
1930 }
1931
1932 if (gfn >= memslots[start].base_gfn &&
1933 gfn < memslots[start].base_gfn + memslots[start].npages) {
1934 atomic_set(&slots->lru_slot, start);
1935 }
1936
1937 return start;
1938}
1939
1940static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1941 u8 *res, unsigned long bufsize)
1942{
1943 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1944
1945 args->count = 0;
1946 while (args->count < bufsize) {
1947 hva = gfn_to_hva(kvm, cur_gfn);
1948 /*
1949 * We return an error if the first value was invalid, but we
1950 * return successfully if at least one value was copied.
1951 */
1952 if (kvm_is_error_hva(hva))
1953 return args->count ? 0 : -EFAULT;
1954 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1955 pgstev = 0;
1956 res[args->count++] = (pgstev >> 24) & 0x43;
1957 cur_gfn++;
1958 }
1959
1960 return 0;
1961}
1962
1963static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1964 unsigned long cur_gfn)
1965{
1966 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1967 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1968 unsigned long ofs = cur_gfn - ms->base_gfn;
1969
1970 if (ms->base_gfn + ms->npages <= cur_gfn) {
1971 slotidx--;
1972 /* If we are above the highest slot, wrap around */
1973 if (slotidx < 0)
1974 slotidx = slots->used_slots - 1;
1975
1976 ms = slots->memslots + slotidx;
1977 ofs = 0;
1978 }
1979 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1980 while ((slotidx > 0) && (ofs >= ms->npages)) {
1981 slotidx--;
1982 ms = slots->memslots + slotidx;
1983 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1984 }
1985 return ms->base_gfn + ofs;
1986}
1987
1988static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1989 u8 *res, unsigned long bufsize)
1990{
1991 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1992 struct kvm_memslots *slots = kvm_memslots(kvm);
1993 struct kvm_memory_slot *ms;
1994
1995 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1996 ms = gfn_to_memslot(kvm, cur_gfn);
1997 args->count = 0;
1998 args->start_gfn = cur_gfn;
1999 if (!ms)
2000 return 0;
2001 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2002 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2003
2004 while (args->count < bufsize) {
2005 hva = gfn_to_hva(kvm, cur_gfn);
2006 if (kvm_is_error_hva(hva))
2007 return 0;
2008 /* Decrement only if we actually flipped the bit to 0 */
2009 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2010 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2011 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2012 pgstev = 0;
2013 /* Save the value */
2014 res[args->count++] = (pgstev >> 24) & 0x43;
2015 /* If the next bit is too far away, stop. */
2016 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2017 return 0;
2018 /* If we reached the previous "next", find the next one */
2019 if (cur_gfn == next_gfn)
2020 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2021 /* Reached the end of memory or of the buffer, stop */
2022 if ((next_gfn >= mem_end) ||
2023 (next_gfn - args->start_gfn >= bufsize))
2024 return 0;
2025 cur_gfn++;
2026 /* Reached the end of the current memslot, take the next one. */
2027 if (cur_gfn - ms->base_gfn >= ms->npages) {
2028 ms = gfn_to_memslot(kvm, cur_gfn);
2029 if (!ms)
2030 return 0;
2031 }
2032 }
2033 return 0;
2034}
2035
2036/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002037 * This function searches for the next page with dirty CMMA attributes, and
2038 * saves the attributes in the buffer up to either the end of the buffer or
2039 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2040 * no trailing clean bytes are saved.
2041 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2042 * output buffer will indicate 0 as length.
2043 */
2044static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2045 struct kvm_s390_cmma_log *args)
2046{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002047 unsigned long bufsize;
2048 int srcu_idx, peek, ret;
2049 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002050
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002051 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002052 return -ENXIO;
2053 /* Invalid/unsupported flags were specified */
2054 if (args->flags & ~KVM_S390_CMMA_PEEK)
2055 return -EINVAL;
2056 /* Migration mode query, and we are not doing a migration */
2057 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002058 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002059 return -EINVAL;
2060 /* CMMA is disabled or was not used, or the buffer has length zero */
2061 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002062 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002063 memset(args, 0, sizeof(*args));
2064 return 0;
2065 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002066 /* We are not peeking, and there are no dirty pages */
2067 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2068 memset(args, 0, sizeof(*args));
2069 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002070 }
2071
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002072 values = vmalloc(bufsize);
2073 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 return -ENOMEM;
2075
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076 down_read(&kvm->mm->mmap_sem);
2077 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002078 if (peek)
2079 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2080 else
2081 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002082 srcu_read_unlock(&kvm->srcu, srcu_idx);
2083 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002084
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002085 if (kvm->arch.migration_mode)
2086 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2087 else
2088 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002089
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002090 if (copy_to_user((void __user *)args->values, values, args->count))
2091 ret = -EFAULT;
2092
2093 vfree(values);
2094 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002095}
2096
2097/*
2098 * This function sets the CMMA attributes for the given pages. If the input
2099 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002100 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002101 */
2102static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2103 const struct kvm_s390_cmma_log *args)
2104{
2105 unsigned long hva, mask, pgstev, i;
2106 uint8_t *bits;
2107 int srcu_idx, r = 0;
2108
2109 mask = args->mask;
2110
2111 if (!kvm->arch.use_cmma)
2112 return -ENXIO;
2113 /* invalid/unsupported flags */
2114 if (args->flags != 0)
2115 return -EINVAL;
2116 /* Enforce sane limit on memory allocation */
2117 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2118 return -EINVAL;
2119 /* Nothing to do */
2120 if (args->count == 0)
2121 return 0;
2122
Kees Cook42bc47b2018-06-12 14:27:11 -07002123 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002124 if (!bits)
2125 return -ENOMEM;
2126
2127 r = copy_from_user(bits, (void __user *)args->values, args->count);
2128 if (r) {
2129 r = -EFAULT;
2130 goto out;
2131 }
2132
2133 down_read(&kvm->mm->mmap_sem);
2134 srcu_idx = srcu_read_lock(&kvm->srcu);
2135 for (i = 0; i < args->count; i++) {
2136 hva = gfn_to_hva(kvm, args->start_gfn + i);
2137 if (kvm_is_error_hva(hva)) {
2138 r = -EFAULT;
2139 break;
2140 }
2141
2142 pgstev = bits[i];
2143 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002144 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002145 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2146 }
2147 srcu_read_unlock(&kvm->srcu, srcu_idx);
2148 up_read(&kvm->mm->mmap_sem);
2149
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002150 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002151 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002152 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002153 up_write(&kvm->mm->mmap_sem);
2154 }
2155out:
2156 vfree(bits);
2157 return r;
2158}
2159
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002160long kvm_arch_vm_ioctl(struct file *filp,
2161 unsigned int ioctl, unsigned long arg)
2162{
2163 struct kvm *kvm = filp->private_data;
2164 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002165 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002166 int r;
2167
2168 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002169 case KVM_S390_INTERRUPT: {
2170 struct kvm_s390_interrupt s390int;
2171
2172 r = -EFAULT;
2173 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2174 break;
2175 r = kvm_s390_inject_vm(kvm, &s390int);
2176 break;
2177 }
Cornelia Huck84223592013-07-15 13:36:01 +02002178 case KVM_CREATE_IRQCHIP: {
2179 struct kvm_irq_routing_entry routing;
2180
2181 r = -EINVAL;
2182 if (kvm->arch.use_irqchip) {
2183 /* Set up dummy routing. */
2184 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002185 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002186 }
2187 break;
2188 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002189 case KVM_SET_DEVICE_ATTR: {
2190 r = -EFAULT;
2191 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2192 break;
2193 r = kvm_s390_vm_set_attr(kvm, &attr);
2194 break;
2195 }
2196 case KVM_GET_DEVICE_ATTR: {
2197 r = -EFAULT;
2198 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2199 break;
2200 r = kvm_s390_vm_get_attr(kvm, &attr);
2201 break;
2202 }
2203 case KVM_HAS_DEVICE_ATTR: {
2204 r = -EFAULT;
2205 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2206 break;
2207 r = kvm_s390_vm_has_attr(kvm, &attr);
2208 break;
2209 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002210 case KVM_S390_GET_SKEYS: {
2211 struct kvm_s390_skeys args;
2212
2213 r = -EFAULT;
2214 if (copy_from_user(&args, argp,
2215 sizeof(struct kvm_s390_skeys)))
2216 break;
2217 r = kvm_s390_get_skeys(kvm, &args);
2218 break;
2219 }
2220 case KVM_S390_SET_SKEYS: {
2221 struct kvm_s390_skeys args;
2222
2223 r = -EFAULT;
2224 if (copy_from_user(&args, argp,
2225 sizeof(struct kvm_s390_skeys)))
2226 break;
2227 r = kvm_s390_set_skeys(kvm, &args);
2228 break;
2229 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002230 case KVM_S390_GET_CMMA_BITS: {
2231 struct kvm_s390_cmma_log args;
2232
2233 r = -EFAULT;
2234 if (copy_from_user(&args, argp, sizeof(args)))
2235 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002236 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002237 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002238 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002239 if (!r) {
2240 r = copy_to_user(argp, &args, sizeof(args));
2241 if (r)
2242 r = -EFAULT;
2243 }
2244 break;
2245 }
2246 case KVM_S390_SET_CMMA_BITS: {
2247 struct kvm_s390_cmma_log args;
2248
2249 r = -EFAULT;
2250 if (copy_from_user(&args, argp, sizeof(args)))
2251 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002252 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002253 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002254 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002255 break;
2256 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002257 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002258 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002259 }
2260
2261 return r;
2262}
2263
Tony Krowiak45c9b472015-01-13 11:33:26 -05002264static int kvm_s390_apxa_installed(void)
2265{
Tony Krowiake585b242018-09-25 19:16:18 -04002266 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002267
Tony Krowiake585b242018-09-25 19:16:18 -04002268 if (ap_instructions_available()) {
2269 if (ap_qci(&info) == 0)
2270 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002271 }
2272
2273 return 0;
2274}
2275
Tony Krowiake585b242018-09-25 19:16:18 -04002276/*
2277 * The format of the crypto control block (CRYCB) is specified in the 3 low
2278 * order bits of the CRYCB designation (CRYCBD) field as follows:
2279 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2280 * AP extended addressing (APXA) facility are installed.
2281 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2282 * Format 2: Both the APXA and MSAX3 facilities are installed
2283 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002284static void kvm_s390_set_crycb_format(struct kvm *kvm)
2285{
2286 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2287
Tony Krowiake585b242018-09-25 19:16:18 -04002288 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2289 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2290
2291 /* Check whether MSAX3 is installed */
2292 if (!test_kvm_facility(kvm, 76))
2293 return;
2294
Tony Krowiak45c9b472015-01-13 11:33:26 -05002295 if (kvm_s390_apxa_installed())
2296 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2297 else
2298 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2299}
2300
Pierre Morel0e237e42018-10-05 10:31:09 +02002301void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2302 unsigned long *aqm, unsigned long *adm)
2303{
2304 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2305
2306 mutex_lock(&kvm->lock);
2307 kvm_s390_vcpu_block_all(kvm);
2308
2309 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2310 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2311 memcpy(crycb->apcb1.apm, apm, 32);
2312 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2313 apm[0], apm[1], apm[2], apm[3]);
2314 memcpy(crycb->apcb1.aqm, aqm, 32);
2315 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2316 aqm[0], aqm[1], aqm[2], aqm[3]);
2317 memcpy(crycb->apcb1.adm, adm, 32);
2318 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2319 adm[0], adm[1], adm[2], adm[3]);
2320 break;
2321 case CRYCB_FORMAT1:
2322 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2323 memcpy(crycb->apcb0.apm, apm, 8);
2324 memcpy(crycb->apcb0.aqm, aqm, 2);
2325 memcpy(crycb->apcb0.adm, adm, 2);
2326 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2327 apm[0], *((unsigned short *)aqm),
2328 *((unsigned short *)adm));
2329 break;
2330 default: /* Can not happen */
2331 break;
2332 }
2333
2334 /* recreate the shadow crycb for each vcpu */
2335 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2336 kvm_s390_vcpu_unblock_all(kvm);
2337 mutex_unlock(&kvm->lock);
2338}
2339EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2340
Tony Krowiak421045982018-09-25 19:16:25 -04002341void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2342{
2343 mutex_lock(&kvm->lock);
2344 kvm_s390_vcpu_block_all(kvm);
2345
2346 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2347 sizeof(kvm->arch.crypto.crycb->apcb0));
2348 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2349 sizeof(kvm->arch.crypto.crycb->apcb1));
2350
Pierre Morel0e237e42018-10-05 10:31:09 +02002351 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002352 /* recreate the shadow crycb for each vcpu */
2353 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002354 kvm_s390_vcpu_unblock_all(kvm);
2355 mutex_unlock(&kvm->lock);
2356}
2357EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2358
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002359static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002360{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002361 struct cpuid cpuid;
2362
2363 get_cpu_id(&cpuid);
2364 cpuid.version = 0xff;
2365 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002366}
2367
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002368static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002369{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002370 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002371 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002372
Tony Krowiake585b242018-09-25 19:16:18 -04002373 if (!test_kvm_facility(kvm, 76))
2374 return;
2375
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002376 /* Enable AES/DEA protected key functions by default */
2377 kvm->arch.crypto.aes_kw = 1;
2378 kvm->arch.crypto.dea_kw = 1;
2379 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2380 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2381 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2382 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002383}
2384
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002385static void sca_dispose(struct kvm *kvm)
2386{
2387 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002388 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002389 else
2390 free_page((unsigned long)(kvm->arch.sca));
2391 kvm->arch.sca = NULL;
2392}
2393
Carsten Ottee08b9632012-01-04 10:25:20 +01002394int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002395{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002396 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002397 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002398 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002399 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002400
Carsten Ottee08b9632012-01-04 10:25:20 +01002401 rc = -EINVAL;
2402#ifdef CONFIG_KVM_S390_UCONTROL
2403 if (type & ~KVM_VM_S390_UCONTROL)
2404 goto out_err;
2405 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2406 goto out_err;
2407#else
2408 if (type)
2409 goto out_err;
2410#endif
2411
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002412 rc = s390_enable_sie();
2413 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002414 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002415
Carsten Otteb2904112011-10-18 12:27:13 +02002416 rc = -ENOMEM;
2417
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002418 if (!sclp.has_64bscao)
2419 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002420 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002421 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002422 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002423 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002424 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002425 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002426 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002427 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002428 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002429 kvm->arch.sca = (struct bsca_block *)
2430 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002431 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002432
2433 sprintf(debug_name, "kvm-%u", current->pid);
2434
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002435 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002436 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002437 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002438
Michael Mueller19114be2017-05-30 14:26:02 +02002439 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002440 kvm->arch.sie_page2 =
2441 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2442 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002443 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002444
Michael Mueller25c84db2019-01-31 09:52:41 +01002445 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002446 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002447
2448 for (i = 0; i < kvm_s390_fac_size(); i++) {
2449 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2450 (kvm_s390_fac_base[i] |
2451 kvm_s390_fac_ext[i]);
2452 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2453 kvm_s390_fac_base[i];
2454 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002455 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002456
David Hildenbrand19352222017-08-29 16:31:08 +02002457 /* we are always in czam mode - even on pre z14 machines */
2458 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2459 set_kvm_facility(kvm->arch.model.fac_list, 138);
2460 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002461 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2462 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002463 if (MACHINE_HAS_TLB_GUEST) {
2464 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2465 set_kvm_facility(kvm->arch.model.fac_list, 147);
2466 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002467
Pierre Morel05f31e32019-05-21 17:34:37 +02002468 if (css_general_characteristics.aiv && test_facility(65))
2469 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2470
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002471 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002472 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002473
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002474 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002475
Fei Li51978392017-02-17 17:06:26 +08002476 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002477 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002478 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2479 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002480 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002481 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002482
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002483 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002484 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002485
Carsten Ottee08b9632012-01-04 10:25:20 +01002486 if (type & KVM_VM_S390_UCONTROL) {
2487 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002488 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002489 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002490 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002491 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002492 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002493 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002494 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002495 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002496 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002497 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002498 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002499 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002500 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002501
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002502 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002503 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002504 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002505 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002506 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002507 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002508
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002509 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002510out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002511 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002512 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002513 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002514 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002515 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516}
2517
Christian Borntraegerd329c032008-11-26 14:50:27 +01002518void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2519{
2520 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002521 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002522 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002523 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002524 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002525 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002526
2527 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002528 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002529
Dominik Dingele6db1d62015-05-07 15:41:57 +02002530 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002531 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002532 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002533
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002534 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002535 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002536}
2537
2538static void kvm_free_vcpus(struct kvm *kvm)
2539{
2540 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002541 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002542
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002543 kvm_for_each_vcpu(i, vcpu, kvm)
2544 kvm_arch_vcpu_destroy(vcpu);
2545
2546 mutex_lock(&kvm->lock);
2547 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2548 kvm->vcpus[i] = NULL;
2549
2550 atomic_set(&kvm->online_vcpus, 0);
2551 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002552}
2553
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002554void kvm_arch_destroy_vm(struct kvm *kvm)
2555{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002556 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002557 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002558 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002559 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002560 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002561 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002562 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002563 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002564 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002565 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002566 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002567}
2568
2569/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002570static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2571{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002572 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002573 if (!vcpu->arch.gmap)
2574 return -ENOMEM;
2575 vcpu->arch.gmap->private = vcpu->kvm;
2576
2577 return 0;
2578}
2579
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002580static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2581{
David Hildenbranda6940672016-08-08 22:39:32 +02002582 if (!kvm_s390_use_sca_entries())
2583 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002584 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002585 if (vcpu->kvm->arch.use_esca) {
2586 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002587
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002588 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002589 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002590 } else {
2591 struct bsca_block *sca = vcpu->kvm->arch.sca;
2592
2593 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002594 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002595 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002596 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002597}
2598
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002599static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002600{
David Hildenbranda6940672016-08-08 22:39:32 +02002601 if (!kvm_s390_use_sca_entries()) {
2602 struct bsca_block *sca = vcpu->kvm->arch.sca;
2603
2604 /* we still need the basic sca for the ipte control */
2605 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2606 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002607 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002608 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002609 read_lock(&vcpu->kvm->arch.sca_lock);
2610 if (vcpu->kvm->arch.use_esca) {
2611 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002612
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002613 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002614 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2615 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002616 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002617 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002618 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002619 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002620
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002621 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002622 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2623 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002624 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002625 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002626 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002627}
2628
2629/* Basic SCA to Extended SCA data copy routines */
2630static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2631{
2632 d->sda = s->sda;
2633 d->sigp_ctrl.c = s->sigp_ctrl.c;
2634 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2635}
2636
2637static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2638{
2639 int i;
2640
2641 d->ipte_control = s->ipte_control;
2642 d->mcn[0] = s->mcn;
2643 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2644 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2645}
2646
2647static int sca_switch_to_extended(struct kvm *kvm)
2648{
2649 struct bsca_block *old_sca = kvm->arch.sca;
2650 struct esca_block *new_sca;
2651 struct kvm_vcpu *vcpu;
2652 unsigned int vcpu_idx;
2653 u32 scaol, scaoh;
2654
2655 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2656 if (!new_sca)
2657 return -ENOMEM;
2658
2659 scaoh = (u32)((u64)(new_sca) >> 32);
2660 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2661
2662 kvm_s390_vcpu_block_all(kvm);
2663 write_lock(&kvm->arch.sca_lock);
2664
2665 sca_copy_b_to_e(new_sca, old_sca);
2666
2667 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2668 vcpu->arch.sie_block->scaoh = scaoh;
2669 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002670 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002671 }
2672 kvm->arch.sca = new_sca;
2673 kvm->arch.use_esca = 1;
2674
2675 write_unlock(&kvm->arch.sca_lock);
2676 kvm_s390_vcpu_unblock_all(kvm);
2677
2678 free_page((unsigned long)old_sca);
2679
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002680 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2681 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002682 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002683}
2684
2685static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2686{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002687 int rc;
2688
David Hildenbranda6940672016-08-08 22:39:32 +02002689 if (!kvm_s390_use_sca_entries()) {
2690 if (id < KVM_MAX_VCPUS)
2691 return true;
2692 return false;
2693 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002694 if (id < KVM_S390_BSCA_CPU_SLOTS)
2695 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002696 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002697 return false;
2698
2699 mutex_lock(&kvm->lock);
2700 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2701 mutex_unlock(&kvm->lock);
2702
2703 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002704}
2705
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002706int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2707{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002708 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2709 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002710 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2711 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002712 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002713 KVM_SYNC_CRS |
2714 KVM_SYNC_ARCH0 |
2715 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002716 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002717 if (test_kvm_facility(vcpu->kvm, 64))
2718 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002719 if (test_kvm_facility(vcpu->kvm, 82))
2720 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002721 if (test_kvm_facility(vcpu->kvm, 133))
2722 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002723 if (test_kvm_facility(vcpu->kvm, 156))
2724 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002725 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2726 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2727 */
2728 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002729 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002730 else
2731 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002732
2733 if (kvm_is_ucontrol(vcpu->kvm))
2734 return __kvm_ucontrol_vcpu_init(vcpu);
2735
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002736 return 0;
2737}
2738
David Hildenbranddb0758b2016-02-15 09:42:25 +01002739/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2740static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2741{
2742 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002743 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002744 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002745 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002746}
2747
2748/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2749static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2750{
2751 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002752 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002753 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2754 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002755 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002756}
2757
2758/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2759static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2760{
2761 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2762 vcpu->arch.cputm_enabled = true;
2763 __start_cpu_timer_accounting(vcpu);
2764}
2765
2766/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2767static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2768{
2769 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2770 __stop_cpu_timer_accounting(vcpu);
2771 vcpu->arch.cputm_enabled = false;
2772}
2773
2774static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2775{
2776 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2777 __enable_cpu_timer_accounting(vcpu);
2778 preempt_enable();
2779}
2780
2781static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2782{
2783 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2784 __disable_cpu_timer_accounting(vcpu);
2785 preempt_enable();
2786}
2787
David Hildenbrand4287f242016-02-15 09:40:12 +01002788/* set the cpu timer - may only be called from the VCPU thread itself */
2789void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2790{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002791 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002792 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002793 if (vcpu->arch.cputm_enabled)
2794 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002795 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002796 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002797 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002798}
2799
David Hildenbranddb0758b2016-02-15 09:42:25 +01002800/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002801__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2802{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002803 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002804 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002805
2806 if (unlikely(!vcpu->arch.cputm_enabled))
2807 return vcpu->arch.sie_block->cputm;
2808
David Hildenbrand9c23a132016-02-17 21:53:33 +01002809 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2810 do {
2811 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2812 /*
2813 * If the writer would ever execute a read in the critical
2814 * section, e.g. in irq context, we have a deadlock.
2815 */
2816 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2817 value = vcpu->arch.sie_block->cputm;
2818 /* if cputm_start is 0, accounting is being started/stopped */
2819 if (likely(vcpu->arch.cputm_start))
2820 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2821 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2822 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002823 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002824}
2825
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002826void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2827{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002828
David Hildenbrand37d9df92015-03-11 16:47:33 +01002829 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002830 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002831 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002832 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002833 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002834}
2835
2836void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2837{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002838 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002839 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002840 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002841 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002842 vcpu->arch.enabled_gmap = gmap_get_enabled();
2843 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002844
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002845}
2846
2847static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2848{
2849 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2850 vcpu->arch.sie_block->gpsw.mask = 0UL;
2851 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002852 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002853 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002854 vcpu->arch.sie_block->ckc = 0UL;
2855 vcpu->arch.sie_block->todpr = 0;
2856 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
David Hildenbrandb9224cd2018-04-30 17:55:24 +02002857 vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 |
2858 CR0_INTERRUPT_KEY_SUBMASK |
2859 CR0_MEASUREMENT_ALERT_SUBMASK;
2860 vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 |
2861 CR14_UNUSED_33 |
2862 CR14_EXTERNAL_DAMAGE_SUBMASK;
Christian Borntraeger55680892020-01-31 05:02:00 -05002863 vcpu->run->s.regs.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002864 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002865 vcpu->arch.sie_block->pp = 0;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01002866 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002867 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2868 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002869 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2870 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002871 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002872}
2873
Dominik Dingel31928aa2014-12-04 15:47:07 +01002874void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002875{
Jason J. Herne72f25022014-11-25 09:46:02 -05002876 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002877 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002878 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002879 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002880 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002881 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002882 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002883 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002884 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002885 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002886 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2887 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002888 /* make vcpu_load load the right gmap on the first trigger */
2889 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002890}
2891
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002892static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2893{
2894 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2895 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2896 return true;
2897 return false;
2898}
2899
2900static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2901{
2902 /* At least one ECC subfunction must be present */
2903 return kvm_has_pckmo_subfunc(kvm, 32) ||
2904 kvm_has_pckmo_subfunc(kvm, 33) ||
2905 kvm_has_pckmo_subfunc(kvm, 34) ||
2906 kvm_has_pckmo_subfunc(kvm, 40) ||
2907 kvm_has_pckmo_subfunc(kvm, 41);
2908
2909}
2910
Tony Krowiak5102ee82014-06-27 14:46:01 -04002911static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2912{
Tony Krowiake585b242018-09-25 19:16:18 -04002913 /*
2914 * If the AP instructions are not being interpreted and the MSAX3
2915 * facility is not configured for the guest, there is nothing to set up.
2916 */
2917 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002918 return;
2919
Tony Krowiake585b242018-09-25 19:16:18 -04002920 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002921 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002922 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002923 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002924
Tony Krowiake585b242018-09-25 19:16:18 -04002925 if (vcpu->kvm->arch.crypto.apie)
2926 vcpu->arch.sie_block->eca |= ECA_APIE;
2927
2928 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002929 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002930 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002931 /* ecc is also wrapped with AES key */
2932 if (kvm_has_pckmo_ecc(vcpu->kvm))
2933 vcpu->arch.sie_block->ecd |= ECD_ECC;
2934 }
2935
Tony Krowiaka374e892014-09-03 10:13:53 +02002936 if (vcpu->kvm->arch.crypto.dea_kw)
2937 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002938}
2939
Dominik Dingelb31605c2014-03-25 13:47:11 +01002940void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2941{
2942 free_page(vcpu->arch.sie_block->cbrlo);
2943 vcpu->arch.sie_block->cbrlo = 0;
2944}
2945
2946int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2947{
2948 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2949 if (!vcpu->arch.sie_block->cbrlo)
2950 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002951 return 0;
2952}
2953
Michael Mueller91520f12015-02-27 14:32:11 +01002954static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2955{
2956 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2957
Michael Mueller91520f12015-02-27 14:32:11 +01002958 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002959 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002960 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002961}
2962
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002963int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2964{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002965 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002966
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002967 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2968 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002969 CPUSTAT_STOPPED);
2970
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002971 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002972 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002973 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002974 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002975
Michael Mueller91520f12015-02-27 14:32:11 +01002976 kvm_s390_vcpu_setup_model(vcpu);
2977
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002978 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2979 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002980 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002981 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002982 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002983 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002984 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002985
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002986 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002987 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002988 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002989 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2990 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002991 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002992 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002993 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002994 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002995 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002996 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002997 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002998 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002999 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003000 vcpu->arch.sie_block->eca |= ECA_VX;
3001 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003002 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003003 if (test_kvm_facility(vcpu->kvm, 139))
3004 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003005 if (test_kvm_facility(vcpu->kvm, 156))
3006 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003007 if (vcpu->arch.sie_block->gd) {
3008 vcpu->arch.sie_block->eca |= ECA_AIV;
3009 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3010 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3011 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003012 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3013 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003014 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003015
3016 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003017 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003018 else
3019 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003020
Dominik Dingele6db1d62015-05-07 15:41:57 +02003021 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003022 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3023 if (rc)
3024 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003025 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003026 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003027 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003028
Collin Walling67d49d52018-08-31 12:51:19 -04003029 vcpu->arch.sie_block->hpid = HPID_KVM;
3030
Tony Krowiak5102ee82014-06-27 14:46:01 -04003031 kvm_s390_vcpu_crypto_setup(vcpu);
3032
Dominik Dingelb31605c2014-03-25 13:47:11 +01003033 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003034}
3035
3036struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
3037 unsigned int id)
3038{
Carsten Otte4d475552011-10-18 12:27:12 +02003039 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003040 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02003041 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003042
David Hildenbrand42158252015-10-12 12:57:22 +02003043 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02003044 goto out;
3045
3046 rc = -ENOMEM;
3047
Michael Muellerb110fea2013-06-12 13:54:54 +02003048 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003049 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02003050 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003051
QingFeng Haoda72ca42017-06-07 11:41:19 +02003052 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003053 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3054 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003055 goto out_free_cpu;
3056
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003057 vcpu->arch.sie_block = &sie_page->sie_block;
3058 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3059
David Hildenbrandefed1102015-04-16 12:32:41 +02003060 /* the real guest size will always be smaller than msl */
3061 vcpu->arch.sie_block->mso = 0;
3062 vcpu->arch.sie_block->msl = sclp.hamax;
3063
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003064 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003065 spin_lock_init(&vcpu->arch.local_int.lock);
Michael Mueller982cff42019-01-31 09:52:38 +01003066 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003067 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3068 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003069 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003070
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003071 rc = kvm_vcpu_init(vcpu, kvm, id);
3072 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003073 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01003074 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003075 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02003076 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003077
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003078 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003079out_free_sie_block:
3080 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003081out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02003082 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02003083out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003084 return ERR_PTR(rc);
3085}
3086
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003087int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3088{
David Hildenbrand9a022062014-08-05 17:40:47 +02003089 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003090}
3091
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003092bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3093{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003094 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003095}
3096
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003097void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003098{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003099 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003100 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003101}
3102
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003103void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003104{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003105 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003106}
3107
Christian Borntraeger8e236542015-04-09 13:49:04 +02003108static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3109{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003110 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003111 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003112}
3113
David Hildenbrand9ea59722018-09-25 19:16:16 -04003114bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3115{
3116 return atomic_read(&vcpu->arch.sie_block->prog20) &
3117 (PROG_BLOCK_SIE | PROG_REQUEST);
3118}
3119
Christian Borntraeger8e236542015-04-09 13:49:04 +02003120static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3121{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003122 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003123}
3124
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003125/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003126 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003127 * If the CPU is not running (e.g. waiting as idle) the function will
3128 * return immediately. */
3129void exit_sie(struct kvm_vcpu *vcpu)
3130{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003131 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003132 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003133 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3134 cpu_relax();
3135}
3136
Christian Borntraeger8e236542015-04-09 13:49:04 +02003137/* Kick a guest cpu out of SIE to process a request synchronously */
3138void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003139{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003140 kvm_make_request(req, vcpu);
3141 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003142}
3143
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003144static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3145 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003146{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003147 struct kvm *kvm = gmap->private;
3148 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003149 unsigned long prefix;
3150 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003151
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003152 if (gmap_is_shadow(gmap))
3153 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003154 if (start >= 1UL << 31)
3155 /* We are only interested in prefix pages */
3156 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003157 kvm_for_each_vcpu(i, vcpu, kvm) {
3158 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003159 prefix = kvm_s390_get_prefix(vcpu);
3160 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3161 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3162 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003163 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003164 }
3165 }
3166}
3167
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003168bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3169{
3170 /* do not poll with more than halt_poll_max_steal percent of steal time */
3171 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3172 halt_poll_max_steal) {
3173 vcpu->stat.halt_no_poll_steal++;
3174 return true;
3175 }
3176 return false;
3177}
3178
Christoffer Dallb6d33832012-03-08 16:44:24 -05003179int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3180{
3181 /* kvm common code refers to this, but never calls it */
3182 BUG();
3183 return 0;
3184}
3185
Carsten Otte14eebd92012-05-15 14:15:26 +02003186static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3187 struct kvm_one_reg *reg)
3188{
3189 int r = -EINVAL;
3190
3191 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003192 case KVM_REG_S390_TODPR:
3193 r = put_user(vcpu->arch.sie_block->todpr,
3194 (u32 __user *)reg->addr);
3195 break;
3196 case KVM_REG_S390_EPOCHDIFF:
3197 r = put_user(vcpu->arch.sie_block->epoch,
3198 (u64 __user *)reg->addr);
3199 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003200 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003201 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003202 (u64 __user *)reg->addr);
3203 break;
3204 case KVM_REG_S390_CLOCK_COMP:
3205 r = put_user(vcpu->arch.sie_block->ckc,
3206 (u64 __user *)reg->addr);
3207 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003208 case KVM_REG_S390_PFTOKEN:
3209 r = put_user(vcpu->arch.pfault_token,
3210 (u64 __user *)reg->addr);
3211 break;
3212 case KVM_REG_S390_PFCOMPARE:
3213 r = put_user(vcpu->arch.pfault_compare,
3214 (u64 __user *)reg->addr);
3215 break;
3216 case KVM_REG_S390_PFSELECT:
3217 r = put_user(vcpu->arch.pfault_select,
3218 (u64 __user *)reg->addr);
3219 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003220 case KVM_REG_S390_PP:
3221 r = put_user(vcpu->arch.sie_block->pp,
3222 (u64 __user *)reg->addr);
3223 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003224 case KVM_REG_S390_GBEA:
3225 r = put_user(vcpu->arch.sie_block->gbea,
3226 (u64 __user *)reg->addr);
3227 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003228 default:
3229 break;
3230 }
3231
3232 return r;
3233}
3234
3235static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3236 struct kvm_one_reg *reg)
3237{
3238 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003239 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003240
3241 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003242 case KVM_REG_S390_TODPR:
3243 r = get_user(vcpu->arch.sie_block->todpr,
3244 (u32 __user *)reg->addr);
3245 break;
3246 case KVM_REG_S390_EPOCHDIFF:
3247 r = get_user(vcpu->arch.sie_block->epoch,
3248 (u64 __user *)reg->addr);
3249 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003250 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003251 r = get_user(val, (u64 __user *)reg->addr);
3252 if (!r)
3253 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003254 break;
3255 case KVM_REG_S390_CLOCK_COMP:
3256 r = get_user(vcpu->arch.sie_block->ckc,
3257 (u64 __user *)reg->addr);
3258 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003259 case KVM_REG_S390_PFTOKEN:
3260 r = get_user(vcpu->arch.pfault_token,
3261 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003262 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3263 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003264 break;
3265 case KVM_REG_S390_PFCOMPARE:
3266 r = get_user(vcpu->arch.pfault_compare,
3267 (u64 __user *)reg->addr);
3268 break;
3269 case KVM_REG_S390_PFSELECT:
3270 r = get_user(vcpu->arch.pfault_select,
3271 (u64 __user *)reg->addr);
3272 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003273 case KVM_REG_S390_PP:
3274 r = get_user(vcpu->arch.sie_block->pp,
3275 (u64 __user *)reg->addr);
3276 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003277 case KVM_REG_S390_GBEA:
3278 r = get_user(vcpu->arch.sie_block->gbea,
3279 (u64 __user *)reg->addr);
3280 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003281 default:
3282 break;
3283 }
3284
3285 return r;
3286}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003287
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003288static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3289{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003290 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003291 return 0;
3292}
3293
3294int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3295{
Christoffer Dall875656f2017-12-04 21:35:27 +01003296 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003297 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003298 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003299 return 0;
3300}
3301
3302int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3303{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003304 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003305 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003306 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003307 return 0;
3308}
3309
3310int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3311 struct kvm_sregs *sregs)
3312{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003313 vcpu_load(vcpu);
3314
Christian Borntraeger59674c12012-01-11 11:20:33 +01003315 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003316 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003317
3318 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003319 return 0;
3320}
3321
3322int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3323 struct kvm_sregs *sregs)
3324{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003325 vcpu_load(vcpu);
3326
Christian Borntraeger59674c12012-01-11 11:20:33 +01003327 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003328 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003329
3330 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003331 return 0;
3332}
3333
3334int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3335{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003336 int ret = 0;
3337
3338 vcpu_load(vcpu);
3339
3340 if (test_fp_ctl(fpu->fpc)) {
3341 ret = -EINVAL;
3342 goto out;
3343 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003344 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003345 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003346 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3347 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003348 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003349 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003350
3351out:
3352 vcpu_put(vcpu);
3353 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003354}
3355
3356int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3357{
Christoffer Dall13931232017-12-04 21:35:34 +01003358 vcpu_load(vcpu);
3359
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003360 /* make sure we have the latest values */
3361 save_fpu_regs();
3362 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003363 convert_vx_to_fp((freg_t *) fpu->fprs,
3364 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003365 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003366 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003367 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003368
3369 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003370 return 0;
3371}
3372
3373static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3374{
3375 int rc = 0;
3376
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003377 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003378 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003379 else {
3380 vcpu->run->psw_mask = psw.mask;
3381 vcpu->run->psw_addr = psw.addr;
3382 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003383 return rc;
3384}
3385
3386int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3387 struct kvm_translation *tr)
3388{
3389 return -EINVAL; /* not implemented yet */
3390}
3391
David Hildenbrand27291e22014-01-23 12:26:52 +01003392#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3393 KVM_GUESTDBG_USE_HW_BP | \
3394 KVM_GUESTDBG_ENABLE)
3395
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003396int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3397 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003398{
David Hildenbrand27291e22014-01-23 12:26:52 +01003399 int rc = 0;
3400
Christoffer Dall66b56562017-12-04 21:35:33 +01003401 vcpu_load(vcpu);
3402
David Hildenbrand27291e22014-01-23 12:26:52 +01003403 vcpu->guest_debug = 0;
3404 kvm_s390_clear_bp_data(vcpu);
3405
Christoffer Dall66b56562017-12-04 21:35:33 +01003406 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3407 rc = -EINVAL;
3408 goto out;
3409 }
3410 if (!sclp.has_gpere) {
3411 rc = -EINVAL;
3412 goto out;
3413 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003414
3415 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3416 vcpu->guest_debug = dbg->control;
3417 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003418 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003419
3420 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3421 rc = kvm_s390_import_bp_data(vcpu, dbg);
3422 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003423 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003424 vcpu->arch.guestdbg.last_bp = 0;
3425 }
3426
3427 if (rc) {
3428 vcpu->guest_debug = 0;
3429 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003430 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003431 }
3432
Christoffer Dall66b56562017-12-04 21:35:33 +01003433out:
3434 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003435 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003436}
3437
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003438int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3439 struct kvm_mp_state *mp_state)
3440{
Christoffer Dallfd232562017-12-04 21:35:30 +01003441 int ret;
3442
3443 vcpu_load(vcpu);
3444
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003445 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003446 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3447 KVM_MP_STATE_OPERATING;
3448
3449 vcpu_put(vcpu);
3450 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003451}
3452
3453int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3454 struct kvm_mp_state *mp_state)
3455{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003456 int rc = 0;
3457
Christoffer Dalle83dff52017-12-04 21:35:31 +01003458 vcpu_load(vcpu);
3459
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003460 /* user space knows about this interface - let it control the state */
3461 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3462
3463 switch (mp_state->mp_state) {
3464 case KVM_MP_STATE_STOPPED:
3465 kvm_s390_vcpu_stop(vcpu);
3466 break;
3467 case KVM_MP_STATE_OPERATING:
3468 kvm_s390_vcpu_start(vcpu);
3469 break;
3470 case KVM_MP_STATE_LOAD:
3471 case KVM_MP_STATE_CHECK_STOP:
3472 /* fall through - CHECK_STOP and LOAD are not supported yet */
3473 default:
3474 rc = -ENXIO;
3475 }
3476
Christoffer Dalle83dff52017-12-04 21:35:31 +01003477 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003478 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003479}
3480
David Hildenbrand8ad35752014-03-14 11:00:21 +01003481static bool ibs_enabled(struct kvm_vcpu *vcpu)
3482{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003483 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003484}
3485
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003486static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3487{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003488retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003489 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003490 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003491 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003492 /*
3493 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003494 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003495 * This ensures that the ipte instruction for this request has
3496 * already finished. We might race against a second unmapper that
3497 * wants to set the blocking bit. Lets just retry the request loop.
3498 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003499 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003500 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003501 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3502 kvm_s390_get_prefix(vcpu),
3503 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003504 if (rc) {
3505 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003506 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003507 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003508 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003509 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003510
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003511 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3512 vcpu->arch.sie_block->ihcpu = 0xffff;
3513 goto retry;
3514 }
3515
David Hildenbrand8ad35752014-03-14 11:00:21 +01003516 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3517 if (!ibs_enabled(vcpu)) {
3518 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003519 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003520 }
3521 goto retry;
3522 }
3523
3524 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3525 if (ibs_enabled(vcpu)) {
3526 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003527 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003528 }
3529 goto retry;
3530 }
3531
David Hildenbrand6502a342016-06-21 14:19:51 +02003532 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3533 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3534 goto retry;
3535 }
3536
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003537 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3538 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003539 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003540 * instruction manually, in order to provide additional
3541 * functionalities needed for live migration.
3542 */
3543 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3544 goto retry;
3545 }
3546
3547 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3548 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003549 * Re-enable CMM virtualization if CMMA is available and
3550 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003551 */
3552 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003553 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003554 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3555 goto retry;
3556 }
3557
David Hildenbrand0759d062014-05-13 16:54:32 +02003558 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003559 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003560 /* we left the vsie handler, nothing to do, just clear the request */
3561 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003562
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003563 return 0;
3564}
3565
David Hildenbrand0e7def52018-02-07 12:46:43 +01003566void kvm_s390_set_tod_clock(struct kvm *kvm,
3567 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003568{
3569 struct kvm_vcpu *vcpu;
3570 struct kvm_s390_tod_clock_ext htod;
3571 int i;
3572
3573 mutex_lock(&kvm->lock);
3574 preempt_disable();
3575
3576 get_tod_clock_ext((char *)&htod);
3577
3578 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003579 kvm->arch.epdx = 0;
3580 if (test_kvm_facility(kvm, 139)) {
3581 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3582 if (kvm->arch.epoch > gtod->tod)
3583 kvm->arch.epdx -= 1;
3584 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003585
3586 kvm_s390_vcpu_block_all(kvm);
3587 kvm_for_each_vcpu(i, vcpu, kvm) {
3588 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3589 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3590 }
3591
3592 kvm_s390_vcpu_unblock_all(kvm);
3593 preempt_enable();
3594 mutex_unlock(&kvm->lock);
3595}
3596
Thomas Huthfa576c52014-05-06 17:20:16 +02003597/**
3598 * kvm_arch_fault_in_page - fault-in guest page if necessary
3599 * @vcpu: The corresponding virtual cpu
3600 * @gpa: Guest physical address
3601 * @writable: Whether the page should be writable or not
3602 *
3603 * Make sure that a guest page has been faulted-in on the host.
3604 *
3605 * Return: Zero on success, negative error code otherwise.
3606 */
3607long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003608{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003609 return gmap_fault(vcpu->arch.gmap, gpa,
3610 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003611}
3612
Dominik Dingel3c038e62013-10-07 17:11:48 +02003613static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3614 unsigned long token)
3615{
3616 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003617 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003618
3619 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003620 irq.u.ext.ext_params2 = token;
3621 irq.type = KVM_S390_INT_PFAULT_INIT;
3622 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003623 } else {
3624 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003625 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003626 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3627 }
3628}
3629
3630void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3631 struct kvm_async_pf *work)
3632{
3633 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3634 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3635}
3636
3637void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3638 struct kvm_async_pf *work)
3639{
3640 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3641 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3642}
3643
3644void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3645 struct kvm_async_pf *work)
3646{
3647 /* s390 will always inject the page directly */
3648}
3649
3650bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3651{
3652 /*
3653 * s390 will always inject the page directly,
3654 * but we still want check_async_completion to cleanup
3655 */
3656 return true;
3657}
3658
3659static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3660{
3661 hva_t hva;
3662 struct kvm_arch_async_pf arch;
3663 int rc;
3664
3665 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3666 return 0;
3667 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3668 vcpu->arch.pfault_compare)
3669 return 0;
3670 if (psw_extint_disabled(vcpu))
3671 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003672 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003673 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003674 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003675 return 0;
3676 if (!vcpu->arch.gmap->pfault_enabled)
3677 return 0;
3678
Heiko Carstens81480cc2014-01-01 16:36:07 +01003679 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3680 hva += current->thread.gmap_addr & ~PAGE_MASK;
3681 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003682 return 0;
3683
3684 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3685 return rc;
3686}
3687
Thomas Huth3fb4c402013-09-12 10:33:43 +02003688static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003689{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003690 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003691
Dominik Dingel3c038e62013-10-07 17:11:48 +02003692 /*
3693 * On s390 notifications for arriving pages will be delivered directly
3694 * to the guest but the house keeping for completed pfaults is
3695 * handled outside the worker.
3696 */
3697 kvm_check_async_pf_completion(vcpu);
3698
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003699 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3700 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003701
3702 if (need_resched())
3703 schedule();
3704
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003705 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003706 s390_handle_mcck();
3707
Jens Freimann79395032014-04-17 10:10:30 +02003708 if (!kvm_is_ucontrol(vcpu->kvm)) {
3709 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3710 if (rc)
3711 return rc;
3712 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003713
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003714 rc = kvm_s390_handle_requests(vcpu);
3715 if (rc)
3716 return rc;
3717
David Hildenbrand27291e22014-01-23 12:26:52 +01003718 if (guestdbg_enabled(vcpu)) {
3719 kvm_s390_backup_guest_per_regs(vcpu);
3720 kvm_s390_patch_guest_per_regs(vcpu);
3721 }
3722
Michael Mueller9f30f622019-01-31 09:52:44 +01003723 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3724
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003725 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003726 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3727 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3728 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003729
Thomas Huth3fb4c402013-09-12 10:33:43 +02003730 return 0;
3731}
3732
Thomas Huth492d8642015-02-10 16:11:01 +01003733static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3734{
David Hildenbrand56317922016-01-12 17:37:58 +01003735 struct kvm_s390_pgm_info pgm_info = {
3736 .code = PGM_ADDRESSING,
3737 };
3738 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003739 int rc;
3740
3741 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3742 trace_kvm_s390_sie_fault(vcpu);
3743
3744 /*
3745 * We want to inject an addressing exception, which is defined as a
3746 * suppressing or terminating exception. However, since we came here
3747 * by a DAT access exception, the PSW still points to the faulting
3748 * instruction since DAT exceptions are nullifying. So we've got
3749 * to look up the current opcode to get the length of the instruction
3750 * to be able to forward the PSW.
3751 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003752 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003753 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003754 if (rc < 0) {
3755 return rc;
3756 } else if (rc) {
3757 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3758 * Forward by arbitrary ilc, injection will take care of
3759 * nullification if necessary.
3760 */
3761 pgm_info = vcpu->arch.pgm;
3762 ilen = 4;
3763 }
David Hildenbrand56317922016-01-12 17:37:58 +01003764 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3765 kvm_s390_forward_psw(vcpu, ilen);
3766 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003767}
3768
Thomas Huth3fb4c402013-09-12 10:33:43 +02003769static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3770{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003771 struct mcck_volatile_info *mcck_info;
3772 struct sie_page *sie_page;
3773
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003774 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3775 vcpu->arch.sie_block->icptcode);
3776 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3777
David Hildenbrand27291e22014-01-23 12:26:52 +01003778 if (guestdbg_enabled(vcpu))
3779 kvm_s390_restore_guest_per_regs(vcpu);
3780
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003781 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3782 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003783
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003784 if (exit_reason == -EINTR) {
3785 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3786 sie_page = container_of(vcpu->arch.sie_block,
3787 struct sie_page, sie_block);
3788 mcck_info = &sie_page->mcck_info;
3789 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3790 return 0;
3791 }
3792
David Hildenbrand71f116b2015-10-19 16:24:28 +02003793 if (vcpu->arch.sie_block->icptcode > 0) {
3794 int rc = kvm_handle_sie_intercept(vcpu);
3795
3796 if (rc != -EOPNOTSUPP)
3797 return rc;
3798 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3799 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3800 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3801 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3802 return -EREMOTE;
3803 } else if (exit_reason != -EFAULT) {
3804 vcpu->stat.exit_null++;
3805 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003806 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3807 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3808 vcpu->run->s390_ucontrol.trans_exc_code =
3809 current->thread.gmap_addr;
3810 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003811 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003812 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003813 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003814 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003815 if (kvm_arch_setup_async_pf(vcpu))
3816 return 0;
3817 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003818 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003819 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003820}
3821
3822static int __vcpu_run(struct kvm_vcpu *vcpu)
3823{
3824 int rc, exit_reason;
3825
Thomas Huth800c1062013-09-12 10:33:45 +02003826 /*
3827 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3828 * ning the guest), so that memslots (and other stuff) are protected
3829 */
3830 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3831
Thomas Hutha76ccff2013-09-12 10:33:44 +02003832 do {
3833 rc = vcpu_pre_run(vcpu);
3834 if (rc)
3835 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003836
Thomas Huth800c1062013-09-12 10:33:45 +02003837 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003838 /*
3839 * As PF_VCPU will be used in fault handler, between
3840 * guest_enter and guest_exit should be no uaccess.
3841 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003842 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003843 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003844 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003845 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003846 exit_reason = sie64a(vcpu->arch.sie_block,
3847 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003848 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003849 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003850 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003851 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003852 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003853
Thomas Hutha76ccff2013-09-12 10:33:44 +02003854 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003855 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003856
Thomas Huth800c1062013-09-12 10:33:45 +02003857 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003858 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003859}
3860
David Hildenbrandb028ee32014-07-17 10:47:43 +02003861static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3862{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003863 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003864 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003865
3866 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003867 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003868 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3869 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3870 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3871 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3872 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3873 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003874 /* some control register changes require a tlb flush */
3875 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003876 }
3877 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003878 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003879 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3880 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3881 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3882 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3883 }
3884 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3885 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3886 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3887 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003888 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3889 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003890 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003891 /*
3892 * If userspace sets the riccb (e.g. after migration) to a valid state,
3893 * we should enable RI here instead of doing the lazy enablement.
3894 */
3895 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003896 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003897 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003898 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003899 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003900 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003901 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003902 /*
3903 * If userspace sets the gscb (e.g. after migration) to non-zero,
3904 * we should enable GS here instead of doing the lazy enablement.
3905 */
3906 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3907 test_kvm_facility(vcpu->kvm, 133) &&
3908 gscb->gssm &&
3909 !vcpu->arch.gs_enabled) {
3910 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3911 vcpu->arch.sie_block->ecb |= ECB_GS;
3912 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3913 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003914 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003915 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3916 test_kvm_facility(vcpu->kvm, 82)) {
3917 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3918 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3919 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003920 save_access_regs(vcpu->arch.host_acrs);
3921 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003922 /* save host (userspace) fprs/vrs */
3923 save_fpu_regs();
3924 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3925 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3926 if (MACHINE_HAS_VX)
3927 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3928 else
3929 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3930 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3931 if (test_fp_ctl(current->thread.fpu.fpc))
3932 /* User space provided an invalid FPC, let's clear it */
3933 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003934 if (MACHINE_HAS_GS) {
3935 preempt_disable();
3936 __ctl_set_bit(2, 4);
3937 if (current->thread.gs_cb) {
3938 vcpu->arch.host_gscb = current->thread.gs_cb;
3939 save_gs_cb(vcpu->arch.host_gscb);
3940 }
3941 if (vcpu->arch.gs_enabled) {
3942 current->thread.gs_cb = (struct gs_cb *)
3943 &vcpu->run->s.regs.gscb;
3944 restore_gs_cb(current->thread.gs_cb);
3945 }
3946 preempt_enable();
3947 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003948 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003949
David Hildenbrandb028ee32014-07-17 10:47:43 +02003950 kvm_run->kvm_dirty_regs = 0;
3951}
3952
3953static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3954{
3955 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3956 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3957 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3958 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003959 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003960 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3961 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3962 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3963 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3964 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3965 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3966 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003967 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003968 save_access_regs(vcpu->run->s.regs.acrs);
3969 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003970 /* Save guest register state */
3971 save_fpu_regs();
3972 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3973 /* Restore will be done lazily at return */
3974 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3975 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003976 if (MACHINE_HAS_GS) {
3977 __ctl_set_bit(2, 4);
3978 if (vcpu->arch.gs_enabled)
3979 save_gs_cb(current->thread.gs_cb);
3980 preempt_disable();
3981 current->thread.gs_cb = vcpu->arch.host_gscb;
3982 restore_gs_cb(vcpu->arch.host_gscb);
3983 preempt_enable();
3984 if (!vcpu->arch.host_gscb)
3985 __ctl_clear_bit(2, 4);
3986 vcpu->arch.host_gscb = NULL;
3987 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003988 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003989}
3990
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003991int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3992{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003993 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003994
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003995 if (kvm_run->immediate_exit)
3996 return -EINTR;
3997
Thomas Huth200824f2019-09-04 10:51:59 +02003998 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
3999 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4000 return -EINVAL;
4001
Christoffer Dallaccb7572017-12-04 21:35:25 +01004002 vcpu_load(vcpu);
4003
David Hildenbrand27291e22014-01-23 12:26:52 +01004004 if (guestdbg_exit_pending(vcpu)) {
4005 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004006 rc = 0;
4007 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004008 }
4009
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004010 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004011
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004012 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4013 kvm_s390_vcpu_start(vcpu);
4014 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004015 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004016 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004017 rc = -EINVAL;
4018 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004019 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004020
David Hildenbrandb028ee32014-07-17 10:47:43 +02004021 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004022 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004023
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004024 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004025 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004026
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004027 if (signal_pending(current) && !rc) {
4028 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004029 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004030 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004031
David Hildenbrand27291e22014-01-23 12:26:52 +01004032 if (guestdbg_exit_pending(vcpu) && !rc) {
4033 kvm_s390_prepare_debug_exit(vcpu);
4034 rc = 0;
4035 }
4036
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004037 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004038 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004039 rc = 0;
4040 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004041
David Hildenbranddb0758b2016-02-15 09:42:25 +01004042 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004043 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004044
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004045 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004048out:
4049 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004050 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004051}
4052
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004053/*
4054 * store status at address
4055 * we use have two special cases:
4056 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4057 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4058 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004059int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004060{
Carsten Otte092670c2011-07-24 10:48:22 +02004061 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004062 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004063 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004064 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004065 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004066
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004067 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004068 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4069 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004070 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004071 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004072 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4073 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004074 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004075 gpa = px;
4076 } else
4077 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004078
4079 /* manually convert vector registers if necessary */
4080 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004081 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004082 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4083 fprs, 128);
4084 } else {
4085 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004086 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004087 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004088 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004089 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004090 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004091 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004092 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004093 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004094 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004095 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004096 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004097 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004098 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004099 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004100 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004101 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004102 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004103 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004104 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004105 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004106 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004107 &vcpu->arch.sie_block->gcr, 128);
4108 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004109}
4110
Thomas Huthe8798922013-11-06 15:46:33 +01004111int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4112{
4113 /*
4114 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004115 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004116 * it into the save area
4117 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004118 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004119 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004120 save_access_regs(vcpu->run->s.regs.acrs);
4121
4122 return kvm_s390_store_status_unloaded(vcpu, addr);
4123}
4124
David Hildenbrand8ad35752014-03-14 11:00:21 +01004125static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4126{
4127 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004128 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004129}
4130
4131static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4132{
4133 unsigned int i;
4134 struct kvm_vcpu *vcpu;
4135
4136 kvm_for_each_vcpu(i, vcpu, kvm) {
4137 __disable_ibs_on_vcpu(vcpu);
4138 }
4139}
4140
4141static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4142{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004143 if (!sclp.has_ibs)
4144 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004145 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004146 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004147}
4148
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004149void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4150{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004151 int i, online_vcpus, started_vcpus = 0;
4152
4153 if (!is_vcpu_stopped(vcpu))
4154 return;
4155
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004156 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004157 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004158 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004159 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4160
4161 for (i = 0; i < online_vcpus; i++) {
4162 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4163 started_vcpus++;
4164 }
4165
4166 if (started_vcpus == 0) {
4167 /* we're the only active VCPU -> speed it up */
4168 __enable_ibs_on_vcpu(vcpu);
4169 } else if (started_vcpus == 1) {
4170 /*
4171 * As we are starting a second VCPU, we have to disable
4172 * the IBS facility on all VCPUs to remove potentially
4173 * oustanding ENABLE requests.
4174 */
4175 __disable_ibs_on_all_vcpus(vcpu->kvm);
4176 }
4177
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004178 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004179 /*
4180 * Another VCPU might have used IBS while we were offline.
4181 * Let's play safe and flush the VCPU at startup.
4182 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004183 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004184 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004185 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004186}
4187
4188void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4189{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004190 int i, online_vcpus, started_vcpus = 0;
4191 struct kvm_vcpu *started_vcpu = NULL;
4192
4193 if (is_vcpu_stopped(vcpu))
4194 return;
4195
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004196 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004197 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004198 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004199 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4200
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004201 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004202 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004203
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004204 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004205 __disable_ibs_on_vcpu(vcpu);
4206
4207 for (i = 0; i < online_vcpus; i++) {
4208 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4209 started_vcpus++;
4210 started_vcpu = vcpu->kvm->vcpus[i];
4211 }
4212 }
4213
4214 if (started_vcpus == 1) {
4215 /*
4216 * As we only have one VCPU left, we want to enable the
4217 * IBS facility for that VCPU to speed it up.
4218 */
4219 __enable_ibs_on_vcpu(started_vcpu);
4220 }
4221
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004222 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004223 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004224}
4225
Cornelia Huckd6712df2012-12-20 15:32:11 +01004226static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4227 struct kvm_enable_cap *cap)
4228{
4229 int r;
4230
4231 if (cap->flags)
4232 return -EINVAL;
4233
4234 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004235 case KVM_CAP_S390_CSS_SUPPORT:
4236 if (!vcpu->kvm->arch.css_support) {
4237 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004238 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004239 trace_kvm_s390_enable_css(vcpu->kvm);
4240 }
4241 r = 0;
4242 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004243 default:
4244 r = -EINVAL;
4245 break;
4246 }
4247 return r;
4248}
4249
Thomas Huth41408c282015-02-06 15:01:21 +01004250static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4251 struct kvm_s390_mem_op *mop)
4252{
4253 void __user *uaddr = (void __user *)mop->buf;
4254 void *tmpbuf = NULL;
4255 int r, srcu_idx;
4256 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4257 | KVM_S390_MEMOP_F_CHECK_ONLY;
4258
Thomas Hutha13b03b2019-08-29 14:25:17 +02004259 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004260 return -EINVAL;
4261
4262 if (mop->size > MEM_OP_MAX_SIZE)
4263 return -E2BIG;
4264
4265 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4266 tmpbuf = vmalloc(mop->size);
4267 if (!tmpbuf)
4268 return -ENOMEM;
4269 }
4270
4271 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4272
4273 switch (mop->op) {
4274 case KVM_S390_MEMOP_LOGICAL_READ:
4275 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004276 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4277 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004278 break;
4279 }
4280 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4281 if (r == 0) {
4282 if (copy_to_user(uaddr, tmpbuf, mop->size))
4283 r = -EFAULT;
4284 }
4285 break;
4286 case KVM_S390_MEMOP_LOGICAL_WRITE:
4287 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004288 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4289 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004290 break;
4291 }
4292 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4293 r = -EFAULT;
4294 break;
4295 }
4296 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4297 break;
4298 default:
4299 r = -EINVAL;
4300 }
4301
4302 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4303
4304 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4305 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4306
4307 vfree(tmpbuf);
4308 return r;
4309}
4310
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004311long kvm_arch_vcpu_async_ioctl(struct file *filp,
4312 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004313{
4314 struct kvm_vcpu *vcpu = filp->private_data;
4315 void __user *argp = (void __user *)arg;
4316
Avi Kivity93736622010-05-13 12:35:17 +03004317 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004318 case KVM_S390_IRQ: {
4319 struct kvm_s390_irq s390irq;
4320
Jens Freimann47b43c52014-11-11 20:57:06 +01004321 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004322 return -EFAULT;
4323 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004324 }
Avi Kivity93736622010-05-13 12:35:17 +03004325 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004326 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004327 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004328
4329 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004330 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004331 if (s390int_to_s390irq(&s390int, &s390irq))
4332 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004333 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004334 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004335 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004336 return -ENOIOCTLCMD;
4337}
4338
4339long kvm_arch_vcpu_ioctl(struct file *filp,
4340 unsigned int ioctl, unsigned long arg)
4341{
4342 struct kvm_vcpu *vcpu = filp->private_data;
4343 void __user *argp = (void __user *)arg;
4344 int idx;
4345 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004346
4347 vcpu_load(vcpu);
4348
4349 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004350 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004351 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004352 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004353 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004354 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004355 case KVM_S390_SET_INITIAL_PSW: {
4356 psw_t psw;
4357
Avi Kivitybc923cc2010-05-13 12:21:46 +03004358 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004359 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004360 break;
4361 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4362 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004363 }
4364 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03004365 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4366 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004367 case KVM_SET_ONE_REG:
4368 case KVM_GET_ONE_REG: {
4369 struct kvm_one_reg reg;
4370 r = -EFAULT;
4371 if (copy_from_user(&reg, argp, sizeof(reg)))
4372 break;
4373 if (ioctl == KVM_SET_ONE_REG)
4374 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4375 else
4376 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4377 break;
4378 }
Carsten Otte27e03932012-01-04 10:25:21 +01004379#ifdef CONFIG_KVM_S390_UCONTROL
4380 case KVM_S390_UCAS_MAP: {
4381 struct kvm_s390_ucas_mapping ucasmap;
4382
4383 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4384 r = -EFAULT;
4385 break;
4386 }
4387
4388 if (!kvm_is_ucontrol(vcpu->kvm)) {
4389 r = -EINVAL;
4390 break;
4391 }
4392
4393 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4394 ucasmap.vcpu_addr, ucasmap.length);
4395 break;
4396 }
4397 case KVM_S390_UCAS_UNMAP: {
4398 struct kvm_s390_ucas_mapping ucasmap;
4399
4400 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4401 r = -EFAULT;
4402 break;
4403 }
4404
4405 if (!kvm_is_ucontrol(vcpu->kvm)) {
4406 r = -EINVAL;
4407 break;
4408 }
4409
4410 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4411 ucasmap.length);
4412 break;
4413 }
4414#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004415 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004416 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004417 break;
4418 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004419 case KVM_ENABLE_CAP:
4420 {
4421 struct kvm_enable_cap cap;
4422 r = -EFAULT;
4423 if (copy_from_user(&cap, argp, sizeof(cap)))
4424 break;
4425 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4426 break;
4427 }
Thomas Huth41408c282015-02-06 15:01:21 +01004428 case KVM_S390_MEM_OP: {
4429 struct kvm_s390_mem_op mem_op;
4430
4431 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4432 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4433 else
4434 r = -EFAULT;
4435 break;
4436 }
Jens Freimann816c7662014-11-24 17:13:46 +01004437 case KVM_S390_SET_IRQ_STATE: {
4438 struct kvm_s390_irq_state irq_state;
4439
4440 r = -EFAULT;
4441 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4442 break;
4443 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4444 irq_state.len == 0 ||
4445 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4446 r = -EINVAL;
4447 break;
4448 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004449 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004450 r = kvm_s390_set_irq_state(vcpu,
4451 (void __user *) irq_state.buf,
4452 irq_state.len);
4453 break;
4454 }
4455 case KVM_S390_GET_IRQ_STATE: {
4456 struct kvm_s390_irq_state irq_state;
4457
4458 r = -EFAULT;
4459 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4460 break;
4461 if (irq_state.len == 0) {
4462 r = -EINVAL;
4463 break;
4464 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004465 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004466 r = kvm_s390_get_irq_state(vcpu,
4467 (__u8 __user *) irq_state.buf,
4468 irq_state.len);
4469 break;
4470 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004471 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004472 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004473 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004474
4475 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004476 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004477}
4478
Souptick Joarder1499fa82018-04-19 00:49:58 +05304479vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004480{
4481#ifdef CONFIG_KVM_S390_UCONTROL
4482 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4483 && (kvm_is_ucontrol(vcpu->kvm))) {
4484 vmf->page = virt_to_page(vcpu->arch.sie_block);
4485 get_page(vmf->page);
4486 return 0;
4487 }
4488#endif
4489 return VM_FAULT_SIGBUS;
4490}
4491
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05304492int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4493 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09004494{
4495 return 0;
4496}
4497
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004498/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004499int kvm_arch_prepare_memory_region(struct kvm *kvm,
4500 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004501 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004502 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004503{
Nick Wangdd2887e2013-03-25 17:22:57 +01004504 /* A few sanity checks. We can have memory slots which have to be
4505 located/ended at a segment boundary (1MB). The memory in userland is
4506 ok to be fragmented into various different vmas. It is okay to mmap()
4507 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004508
Carsten Otte598841c2011-07-24 10:48:21 +02004509 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004510 return -EINVAL;
4511
Carsten Otte598841c2011-07-24 10:48:21 +02004512 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004513 return -EINVAL;
4514
Dominik Dingela3a92c32014-12-01 17:24:42 +01004515 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4516 return -EINVAL;
4517
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004518 return 0;
4519}
4520
4521void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004522 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004523 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004524 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004525 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004526{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004527 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004528
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004529 switch (change) {
4530 case KVM_MR_DELETE:
4531 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4532 old->npages * PAGE_SIZE);
4533 break;
4534 case KVM_MR_MOVE:
4535 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4536 old->npages * PAGE_SIZE);
4537 if (rc)
4538 break;
4539 /* FALLTHROUGH */
4540 case KVM_MR_CREATE:
4541 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4542 mem->guest_phys_addr, mem->memory_size);
4543 break;
4544 case KVM_MR_FLAGS_ONLY:
4545 break;
4546 default:
4547 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4548 }
Carsten Otte598841c2011-07-24 10:48:21 +02004549 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004550 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004551 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004552}
4553
Alexander Yarygin60a37702016-04-01 15:38:57 +03004554static inline unsigned long nonhyp_mask(int i)
4555{
4556 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4557
4558 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4559}
4560
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004561void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4562{
4563 vcpu->valid_wakeup = false;
4564}
4565
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004566static int __init kvm_s390_init(void)
4567{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004568 int i;
4569
David Hildenbrand07197fd2015-01-30 16:01:38 +01004570 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004571 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004572 return -ENODEV;
4573 }
4574
Janosch Franka4499382018-07-13 11:28:31 +01004575 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004576 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004577 return -EINVAL;
4578 }
4579
Alexander Yarygin60a37702016-04-01 15:38:57 +03004580 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004581 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004582 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4583
Michael Mueller9d8d5782015-02-02 15:42:51 +01004584 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004585}
4586
4587static void __exit kvm_s390_exit(void)
4588{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004589 kvm_exit();
4590}
4591
4592module_init(kvm_s390_init);
4593module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004594
4595/*
4596 * Enable autoloading of the kvm module.
4597 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4598 * since x86 takes a different approach.
4599 */
4600#include <linux/miscdevice.h>
4601MODULE_ALIAS_MISCDEV(KVM_MINOR);
4602MODULE_ALIAS("devname:kvm");