blob: fb081c5715b2303fbcd27e0ba842019e9df73296 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Christian Borntraegera37cb072018-01-23 13:28:40 +01005 * Copyright IBM Corp. 2008, 2018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
Cornelia Huck5786fff2012-07-23 17:20:29 +020050#define CREATE_TRACE_POINTS
51#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020052#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020053
Thomas Huth41408c282015-02-06 15:01:21 +010054#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010055#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010058
Heiko Carstensb0c632d2008-03-25 18:47:20 +010059#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000060#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010061
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020064 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010065 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0ace2018-02-23 07:57:33 +000068 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010069 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010070 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030071 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
73 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020074 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010075 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020076 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020077 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050078 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000084 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
85 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020087 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010088 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000089 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010090 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
91 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
92 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000093 { "deliver_program", VCPU_STAT(deliver_program) },
94 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010095 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010096 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000097 { "inject_ckc", VCPU_STAT(inject_ckc) },
98 { "inject_cputm", VCPU_STAT(inject_cputm) },
99 { "inject_external_call", VCPU_STAT(inject_external_call) },
100 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
101 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
102 { "inject_io", VM_STAT(inject_io) },
103 { "inject_mchk", VCPU_STAT(inject_mchk) },
104 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
105 { "inject_program", VCPU_STAT(inject_program) },
106 { "inject_restart", VCPU_STAT(inject_restart) },
107 { "inject_service_signal", VM_STAT(inject_service_signal) },
108 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
109 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
110 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
111 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100112 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
113 { "instruction_gs", VCPU_STAT(instruction_gs) },
114 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
115 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
116 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200117 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100118 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100119 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100120 { "instruction_sck", VCPU_STAT(instruction_sck) },
121 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100122 { "instruction_spx", VCPU_STAT(instruction_spx) },
123 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
124 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100125 { "instruction_iske", VCPU_STAT(instruction_iske) },
126 { "instruction_ri", VCPU_STAT(instruction_ri) },
127 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
128 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100129 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200130 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100131 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
132 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100133 { "instruction_tb", VCPU_STAT(instruction_tb) },
134 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200135 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100136 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200137 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200138 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100139 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100140 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200141 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100142 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200143 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
144 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100145 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200146 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
147 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500148 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100149 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
150 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
151 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200152 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
153 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
154 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100155 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
156 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
157 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger8474e5c2019-02-15 13:47:20 +0100158 { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100159 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
160 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
161 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100162 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163 { NULL }
164};
165
Collin L. Walling8fa16962016-07-26 15:29:44 -0400166struct kvm_s390_tod_clock_ext {
167 __u8 epoch_idx;
168 __u64 tod;
169 __u8 reserved[7];
170} __packed;
171
David Hildenbranda411edf2016-02-02 15:41:22 +0100172/* allow nested virtualization in KVM (if enabled by user space) */
173static int nested;
174module_param(nested, int, S_IRUGO);
175MODULE_PARM_DESC(nested, "Nested virtualization support");
176
Janosch Franka4499382018-07-13 11:28:31 +0100177/* allow 1m huge page guest backing, if !nested */
178static int hpage;
179module_param(hpage, int, 0444);
180MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100181
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500182/* maximum percentage of steal time for polling. >100 is treated like 100 */
183static u8 halt_poll_max_steal = 10;
184module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000185MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500186
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000187/*
188 * For now we handle at most 16 double words as this is what the s390 base
189 * kernel handles and stores in the prefix page. If we ever need to go beyond
190 * this, this requires changes to code, but the external uapi can stay.
191 */
192#define SIZE_INTERNAL 16
193
194/*
195 * Base feature mask that defines default mask for facilities. Consists of the
196 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
197 */
198static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
199/*
200 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
201 * and defines the facilities that can be enabled via a cpu model.
202 */
203static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
204
205static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200206{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000207 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
208 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
209 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
210 sizeof(S390_lowcore.stfle_fac_list));
211
212 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200213}
214
David Hildenbrand15c97052015-03-19 17:36:43 +0100215/* available cpu features supported by kvm */
216static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200217/* available subfunctions indicated via query / "test bit" */
218static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100219
Michael Mueller9d8d5782015-02-02 15:42:51 +0100220static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200221static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200222debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100223
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100224/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200225int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100226{
227 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200228 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100229}
230
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700231int kvm_arch_check_processor_compat(void)
232{
233 return 0;
234}
235
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100236static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
237 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200238
David Hildenbrand15757672018-02-07 12:46:45 +0100239static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
240{
241 u8 delta_idx = 0;
242
243 /*
244 * The TOD jumps by delta, we have to compensate this by adding
245 * -delta to the epoch.
246 */
247 delta = -delta;
248
249 /* sign-extension - we're adding to signed values below */
250 if ((s64)delta < 0)
251 delta_idx = -1;
252
253 scb->epoch += delta;
254 if (scb->ecd & ECD_MEF) {
255 scb->epdx += delta_idx;
256 if (scb->epoch < delta)
257 scb->epdx += 1;
258 }
259}
260
Fan Zhangfdf03652015-05-13 10:58:41 +0200261/*
262 * This callback is executed during stop_machine(). All CPUs are therefore
263 * temporarily stopped. In order not to change guest behavior, we have to
264 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
265 * so a CPU won't be stopped while calculating with the epoch.
266 */
267static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
268 void *v)
269{
270 struct kvm *kvm;
271 struct kvm_vcpu *vcpu;
272 int i;
273 unsigned long long *delta = v;
274
275 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200276 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100277 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
278 if (i == 0) {
279 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
280 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
281 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100282 if (vcpu->arch.cputm_enabled)
283 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100284 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100285 kvm_clock_sync_scb(vcpu->arch.vsie_block,
286 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200287 }
288 }
289 return NOTIFY_OK;
290}
291
292static struct notifier_block kvm_clock_notifier = {
293 .notifier_call = kvm_clock_sync,
294};
295
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100296int kvm_arch_hardware_setup(void)
297{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200298 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100299 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200300 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
301 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
303 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100304 return 0;
305}
306
307void kvm_arch_hardware_unsetup(void)
308{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100309 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200310 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200311 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
312 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100313}
314
David Hildenbrand22be5a12016-01-21 13:22:54 +0100315static void allow_cpu_feat(unsigned long nr)
316{
317 set_bit_inv(nr, kvm_s390_available_cpu_feat);
318}
319
David Hildenbrand0a763c72016-05-18 16:03:47 +0200320static inline int plo_test_bit(unsigned char nr)
321{
322 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100323 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200324
325 asm volatile(
326 /* Parameter registers are ignored for "test bit" */
327 " plo 0,0,0,0(0)\n"
328 " ipm %0\n"
329 " srl %0,28\n"
330 : "=d" (cc)
331 : "d" (r0)
332 : "cc");
333 return cc == 0;
334}
335
Heiko Carstensd0dea732019-10-02 14:34:37 +0200336static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500337{
338 register unsigned long r0 asm("0") = 0; /* query function */
339 register unsigned long r1 asm("1") = (unsigned long) query;
340
341 asm volatile(
342 /* Parameter regs are ignored */
343 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200344 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500345 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200346 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500347}
348
Christian Borntraeger173aec22018-12-28 10:59:06 +0100349#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100350#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100351
David Hildenbrand22be5a12016-01-21 13:22:54 +0100352static void kvm_s390_cpu_feat_init(void)
353{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200354 int i;
355
356 for (i = 0; i < 256; ++i) {
357 if (plo_test_bit(i))
358 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
359 }
360
361 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400362 ptff(kvm_s390_available_subfunc.ptff,
363 sizeof(kvm_s390_available_subfunc.ptff),
364 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200365
366 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200367 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
368 kvm_s390_available_subfunc.kmac);
369 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
370 kvm_s390_available_subfunc.kmc);
371 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
372 kvm_s390_available_subfunc.km);
373 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
374 kvm_s390_available_subfunc.kimd);
375 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200377 }
378 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200379 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200381 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200382 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.kmctr);
384 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
385 kvm_s390_available_subfunc.kmf);
386 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
387 kvm_s390_available_subfunc.kmo);
388 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
389 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200390 }
391 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100392 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200394
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400395 if (test_facility(146)) /* MSA8 */
396 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
397 kvm_s390_available_subfunc.kma);
398
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100399 if (test_facility(155)) /* MSA9 */
400 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
401 kvm_s390_available_subfunc.kdsa);
402
Christian Borntraeger173aec22018-12-28 10:59:06 +0100403 if (test_facility(150)) /* SORTL */
404 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
405
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100406 if (test_facility(151)) /* DFLTCC */
407 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
408
David Hildenbrand22be5a12016-01-21 13:22:54 +0100409 if (MACHINE_HAS_ESOP)
410 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200411 /*
412 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
413 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
414 */
415 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100416 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200417 return;
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100419 if (sclp.has_64bscao)
420 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100421 if (sclp.has_siif)
422 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100423 if (sclp.has_gpere)
424 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100425 if (sclp.has_gsls)
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100427 if (sclp.has_ib)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100429 if (sclp.has_cei)
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100431 if (sclp.has_ibs)
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500433 if (sclp.has_kss)
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200435 /*
436 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
437 * all skey handling functions read/set the skey from the PGSTE
438 * instead of the real storage key.
439 *
440 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
441 * pages being detected as preserved although they are resident.
442 *
443 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
444 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
445 *
446 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
447 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
448 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
449 *
450 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
451 * cannot easily shadow the SCA because of the ipte lock.
452 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100453}
454
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100455int kvm_arch_init(void *opaque)
456{
Janosch Frankf76f6372019-10-02 03:56:27 -0400457 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100458
Christian Borntraeger78f26132015-07-22 15:50:58 +0200459 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
460 if (!kvm_s390_dbf)
461 return -ENOMEM;
462
Janosch Frankf76f6372019-10-02 03:56:27 -0400463 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view))
464 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200465
David Hildenbrand22be5a12016-01-21 13:22:54 +0100466 kvm_s390_cpu_feat_init();
467
Cornelia Huck84877d92014-09-02 10:27:35 +0100468 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100469 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
470 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100471 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400472 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100473 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100474
475 rc = kvm_s390_gib_init(GAL_ISC);
476 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400477 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100478
Michael Mueller308c3e62018-11-30 15:32:06 +0100479 return 0;
480
Janosch Frankf76f6372019-10-02 03:56:27 -0400481out:
482 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100483 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100484}
485
Christian Borntraeger78f26132015-07-22 15:50:58 +0200486void kvm_arch_exit(void)
487{
Michael Mueller1282c212019-01-31 09:52:40 +0100488 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489 debug_unregister(kvm_s390_dbf);
490}
491
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100492/* Section: device related */
493long kvm_arch_dev_ioctl(struct file *filp,
494 unsigned int ioctl, unsigned long arg)
495{
496 if (ioctl == KVM_S390_ENABLE_SIE)
497 return s390_enable_sie();
498 return -EINVAL;
499}
500
Alexander Graf784aa3d2014-07-14 18:27:35 +0200501int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100502{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100503 int r;
504
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200505 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100506 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200507 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100508 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100509#ifdef CONFIG_KVM_S390_UCONTROL
510 case KVM_CAP_S390_UCONTROL:
511#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200512 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100513 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200514 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100515 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100516 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100517 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200518 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200519 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200520 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200521 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100522 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100523 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200524 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100525 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400526 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100527 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200528 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200529 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100530 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100531 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500532 case KVM_CAP_S390_VCPU_RESETS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100533 r = 1;
534 break;
Janosch Franka4499382018-07-13 11:28:31 +0100535 case KVM_CAP_S390_HPAGE_1M:
536 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100537 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100538 r = 1;
539 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100540 case KVM_CAP_S390_MEM_OP:
541 r = MEM_OP_MAX_SIZE;
542 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200543 case KVM_CAP_NR_VCPUS:
544 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200545 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100546 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200547 if (!kvm_s390_use_sca_entries())
548 r = KVM_MAX_VCPUS;
549 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100550 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200551 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200552 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100553 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200554 break;
Eric Farman68c55752014-06-09 10:57:26 -0400555 case KVM_CAP_S390_VECTOR_REGISTERS:
556 r = MACHINE_HAS_VX;
557 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800558 case KVM_CAP_S390_RI:
559 r = test_facility(64);
560 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100561 case KVM_CAP_S390_GS:
562 r = test_facility(133);
563 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100564 case KVM_CAP_S390_BPB:
565 r = test_facility(82);
566 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200567 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100568 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200569 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100570 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100571}
572
Sean Christopherson0dff0842020-02-18 13:07:29 -0800573void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400574{
Janosch Frank0959e162018-07-17 13:21:22 +0100575 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400576 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100577 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400578 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100579 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400580
Janosch Frank0959e162018-07-17 13:21:22 +0100581 /* Loop over all guest segments */
582 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400583 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100584 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
585 gaddr = gfn_to_gpa(cur_gfn);
586 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
587 if (kvm_is_error_hva(vmaddr))
588 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400589
Janosch Frank0959e162018-07-17 13:21:22 +0100590 bitmap_zero(bitmap, _PAGE_ENTRIES);
591 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
592 for (i = 0; i < _PAGE_ENTRIES; i++) {
593 if (test_bit(i, bitmap))
594 mark_page_dirty(kvm, cur_gfn + i);
595 }
596
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100597 if (fatal_signal_pending(current))
598 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100599 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400600 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400601}
602
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100603/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200604static void sca_del_vcpu(struct kvm_vcpu *vcpu);
605
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100606/*
607 * Get (and clear) the dirty memory log for a memory slot.
608 */
609int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
610 struct kvm_dirty_log *log)
611{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400612 int r;
613 unsigned long n;
614 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800615 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400616
Janosch Franke1e8a962017-02-02 16:39:31 +0100617 if (kvm_is_ucontrol(kvm))
618 return -EINVAL;
619
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 mutex_lock(&kvm->slots_lock);
621
622 r = -EINVAL;
623 if (log->slot >= KVM_USER_MEM_SLOTS)
624 goto out;
625
Sean Christopherson2a49f612020-02-18 13:07:30 -0800626 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400627 if (r)
628 goto out;
629
630 /* Clear the dirty log */
631 if (is_dirty) {
632 n = kvm_dirty_bitmap_bytes(memslot);
633 memset(memslot->dirty_bitmap, 0, n);
634 }
635 r = 0;
636out:
637 mutex_unlock(&kvm->slots_lock);
638 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100639}
640
David Hildenbrand6502a342016-06-21 14:19:51 +0200641static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
642{
643 unsigned int i;
644 struct kvm_vcpu *vcpu;
645
646 kvm_for_each_vcpu(i, vcpu, kvm) {
647 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
648 }
649}
650
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100651int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200652{
653 int r;
654
655 if (cap->flags)
656 return -EINVAL;
657
658 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200659 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200660 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200661 kvm->arch.use_irqchip = 1;
662 r = 0;
663 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200664 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200665 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200666 kvm->arch.user_sigp = 1;
667 r = 0;
668 break;
Eric Farman68c55752014-06-09 10:57:26 -0400669 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100670 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200671 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100672 r = -EBUSY;
673 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100674 set_kvm_facility(kvm->arch.model.fac_mask, 129);
675 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200676 if (test_facility(134)) {
677 set_kvm_facility(kvm->arch.model.fac_mask, 134);
678 set_kvm_facility(kvm->arch.model.fac_list, 134);
679 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100680 if (test_facility(135)) {
681 set_kvm_facility(kvm->arch.model.fac_mask, 135);
682 set_kvm_facility(kvm->arch.model.fac_list, 135);
683 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100684 if (test_facility(148)) {
685 set_kvm_facility(kvm->arch.model.fac_mask, 148);
686 set_kvm_facility(kvm->arch.model.fac_list, 148);
687 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100688 if (test_facility(152)) {
689 set_kvm_facility(kvm->arch.model.fac_mask, 152);
690 set_kvm_facility(kvm->arch.model.fac_list, 152);
691 }
Michael Mueller18280d82015-03-16 16:05:41 +0100692 r = 0;
693 } else
694 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100695 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200696 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
697 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400698 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800699 case KVM_CAP_S390_RI:
700 r = -EINVAL;
701 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200702 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800703 r = -EBUSY;
704 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100705 set_kvm_facility(kvm->arch.model.fac_mask, 64);
706 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800707 r = 0;
708 }
709 mutex_unlock(&kvm->lock);
710 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
711 r ? "(not available)" : "(success)");
712 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100713 case KVM_CAP_S390_AIS:
714 mutex_lock(&kvm->lock);
715 if (kvm->created_vcpus) {
716 r = -EBUSY;
717 } else {
718 set_kvm_facility(kvm->arch.model.fac_mask, 72);
719 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100720 r = 0;
721 }
722 mutex_unlock(&kvm->lock);
723 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
724 r ? "(not available)" : "(success)");
725 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100726 case KVM_CAP_S390_GS:
727 r = -EINVAL;
728 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100729 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100730 r = -EBUSY;
731 } else if (test_facility(133)) {
732 set_kvm_facility(kvm->arch.model.fac_mask, 133);
733 set_kvm_facility(kvm->arch.model.fac_list, 133);
734 r = 0;
735 }
736 mutex_unlock(&kvm->lock);
737 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
738 r ? "(not available)" : "(success)");
739 break;
Janosch Franka4499382018-07-13 11:28:31 +0100740 case KVM_CAP_S390_HPAGE_1M:
741 mutex_lock(&kvm->lock);
742 if (kvm->created_vcpus)
743 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100744 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100745 r = -EINVAL;
746 else {
747 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200748 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100749 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200750 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100751 /*
752 * We might have to create fake 4k page
753 * tables. To avoid that the hardware works on
754 * stale PGSTEs, we emulate these instructions.
755 */
756 kvm->arch.use_skf = 0;
757 kvm->arch.use_pfmfi = 0;
758 }
759 mutex_unlock(&kvm->lock);
760 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
761 r ? "(not available)" : "(success)");
762 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100763 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200764 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100765 kvm->arch.user_stsi = 1;
766 r = 0;
767 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200768 case KVM_CAP_S390_USER_INSTR0:
769 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
770 kvm->arch.user_instr0 = 1;
771 icpt_operexc_on_all_vcpus(kvm);
772 r = 0;
773 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200774 default:
775 r = -EINVAL;
776 break;
777 }
778 return r;
779}
780
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100781static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
782{
783 int ret;
784
785 switch (attr->attr) {
786 case KVM_S390_VM_MEM_LIMIT_SIZE:
787 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200788 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100789 kvm->arch.mem_limit);
790 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100791 ret = -EFAULT;
792 break;
793 default:
794 ret = -ENXIO;
795 break;
796 }
797 return ret;
798}
799
800static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200801{
802 int ret;
803 unsigned int idx;
804 switch (attr->attr) {
805 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100806 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100807 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200808 break;
809
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200810 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200811 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100812 if (kvm->created_vcpus)
813 ret = -EBUSY;
814 else if (kvm->mm->context.allow_gmap_hpage_1m)
815 ret = -EINVAL;
816 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200817 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100818 /* Not compatible with cmma. */
819 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200820 ret = 0;
821 }
822 mutex_unlock(&kvm->lock);
823 break;
824 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100825 ret = -ENXIO;
826 if (!sclp.has_cmma)
827 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200828 ret = -EINVAL;
829 if (!kvm->arch.use_cmma)
830 break;
831
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200832 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200833 mutex_lock(&kvm->lock);
834 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200835 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200836 srcu_read_unlock(&kvm->srcu, idx);
837 mutex_unlock(&kvm->lock);
838 ret = 0;
839 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100840 case KVM_S390_VM_MEM_LIMIT_SIZE: {
841 unsigned long new_limit;
842
843 if (kvm_is_ucontrol(kvm))
844 return -EINVAL;
845
846 if (get_user(new_limit, (u64 __user *)attr->addr))
847 return -EFAULT;
848
Dominik Dingela3a92c32014-12-01 17:24:42 +0100849 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
850 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100851 return -E2BIG;
852
Dominik Dingela3a92c32014-12-01 17:24:42 +0100853 if (!new_limit)
854 return -EINVAL;
855
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100856 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100857 if (new_limit != KVM_S390_NO_MEM_LIMIT)
858 new_limit -= 1;
859
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100860 ret = -EBUSY;
861 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200862 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100863 /* gmap_create will round the limit up */
864 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100865
866 if (!new) {
867 ret = -ENOMEM;
868 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100869 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100870 new->private = kvm;
871 kvm->arch.gmap = new;
872 ret = 0;
873 }
874 }
875 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100876 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
877 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
878 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100879 break;
880 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200881 default:
882 ret = -ENXIO;
883 break;
884 }
885 return ret;
886}
887
Tony Krowiaka374e892014-09-03 10:13:53 +0200888static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
889
Tony Krowiak20c922f2018-04-22 11:37:03 -0400890void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200891{
892 struct kvm_vcpu *vcpu;
893 int i;
894
Tony Krowiak20c922f2018-04-22 11:37:03 -0400895 kvm_s390_vcpu_block_all(kvm);
896
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400897 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400898 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400899 /* recreate the shadow crycb by leaving the VSIE handler */
900 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
901 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400902
903 kvm_s390_vcpu_unblock_all(kvm);
904}
905
906static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
907{
Tony Krowiaka374e892014-09-03 10:13:53 +0200908 mutex_lock(&kvm->lock);
909 switch (attr->attr) {
910 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200911 if (!test_kvm_facility(kvm, 76)) {
912 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400913 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200914 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200915 get_random_bytes(
916 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
917 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
918 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200919 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200920 break;
921 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200922 if (!test_kvm_facility(kvm, 76)) {
923 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400924 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200925 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 get_random_bytes(
927 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
928 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
929 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200930 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200931 break;
932 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 if (!test_kvm_facility(kvm, 76)) {
934 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400935 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200936 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200937 kvm->arch.crypto.aes_kw = 0;
938 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
939 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200940 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200941 break;
942 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200943 if (!test_kvm_facility(kvm, 76)) {
944 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400945 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200946 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200947 kvm->arch.crypto.dea_kw = 0;
948 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
949 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200950 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200951 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400952 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
953 if (!ap_instructions_available()) {
954 mutex_unlock(&kvm->lock);
955 return -EOPNOTSUPP;
956 }
957 kvm->arch.crypto.apie = 1;
958 break;
959 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
960 if (!ap_instructions_available()) {
961 mutex_unlock(&kvm->lock);
962 return -EOPNOTSUPP;
963 }
964 kvm->arch.crypto.apie = 0;
965 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 default:
967 mutex_unlock(&kvm->lock);
968 return -ENXIO;
969 }
970
Tony Krowiak20c922f2018-04-22 11:37:03 -0400971 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200972 mutex_unlock(&kvm->lock);
973 return 0;
974}
975
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200976static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
977{
978 int cx;
979 struct kvm_vcpu *vcpu;
980
981 kvm_for_each_vcpu(cx, vcpu, kvm)
982 kvm_s390_sync_request(req, vcpu);
983}
984
985/*
986 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +0100987 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200988 */
989static int kvm_s390_vm_start_migration(struct kvm *kvm)
990{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200991 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200992 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200993 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200994 int slotnr;
995
996 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +0200997 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200998 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200999 slots = kvm_memslots(kvm);
1000 if (!slots || !slots->used_slots)
1001 return -EINVAL;
1002
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001003 if (!kvm->arch.use_cmma) {
1004 kvm->arch.migration_mode = 1;
1005 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001006 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001007 /* mark all the pages in active slots as dirty */
1008 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1009 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001010 if (!ms->dirty_bitmap)
1011 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001012 /*
1013 * The second half of the bitmap is only used on x86,
1014 * and would be wasted otherwise, so we put it to good
1015 * use here to keep track of the state of the storage
1016 * attributes.
1017 */
1018 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1019 ram_pages += ms->npages;
1020 }
1021 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1022 kvm->arch.migration_mode = 1;
1023 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001024 return 0;
1025}
1026
1027/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001028 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001029 * kvm_s390_vm_start_migration.
1030 */
1031static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1032{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001033 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001034 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001035 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001036 kvm->arch.migration_mode = 0;
1037 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001039 return 0;
1040}
1041
1042static int kvm_s390_vm_set_migration(struct kvm *kvm,
1043 struct kvm_device_attr *attr)
1044{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001045 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001047 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001048 switch (attr->attr) {
1049 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001050 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001051 break;
1052 case KVM_S390_VM_MIGRATION_STOP:
1053 res = kvm_s390_vm_stop_migration(kvm);
1054 break;
1055 default:
1056 break;
1057 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001058 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001059
1060 return res;
1061}
1062
1063static int kvm_s390_vm_get_migration(struct kvm *kvm,
1064 struct kvm_device_attr *attr)
1065{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001066 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001067
1068 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1069 return -ENXIO;
1070
1071 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1072 return -EFAULT;
1073 return 0;
1074}
1075
Collin L. Walling8fa16962016-07-26 15:29:44 -04001076static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1077{
1078 struct kvm_s390_vm_tod_clock gtod;
1079
1080 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1081 return -EFAULT;
1082
David Hildenbrand0e7def52018-02-07 12:46:43 +01001083 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001084 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001085 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001086
1087 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1088 gtod.epoch_idx, gtod.tod);
1089
1090 return 0;
1091}
1092
Jason J. Herne72f25022014-11-25 09:46:02 -05001093static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1094{
1095 u8 gtod_high;
1096
1097 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1098 sizeof(gtod_high)))
1099 return -EFAULT;
1100
1101 if (gtod_high != 0)
1102 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001103 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001104
1105 return 0;
1106}
1107
1108static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1109{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001110 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001111
David Hildenbrand0e7def52018-02-07 12:46:43 +01001112 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1113 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001114 return -EFAULT;
1115
David Hildenbrand0e7def52018-02-07 12:46:43 +01001116 kvm_s390_set_tod_clock(kvm, &gtod);
1117 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001118 return 0;
1119}
1120
1121static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1122{
1123 int ret;
1124
1125 if (attr->flags)
1126 return -EINVAL;
1127
1128 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001129 case KVM_S390_VM_TOD_EXT:
1130 ret = kvm_s390_set_tod_ext(kvm, attr);
1131 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001132 case KVM_S390_VM_TOD_HIGH:
1133 ret = kvm_s390_set_tod_high(kvm, attr);
1134 break;
1135 case KVM_S390_VM_TOD_LOW:
1136 ret = kvm_s390_set_tod_low(kvm, attr);
1137 break;
1138 default:
1139 ret = -ENXIO;
1140 break;
1141 }
1142 return ret;
1143}
1144
David Hildenbrand33d1b272018-04-27 14:36:13 +02001145static void kvm_s390_get_tod_clock(struct kvm *kvm,
1146 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001147{
1148 struct kvm_s390_tod_clock_ext htod;
1149
1150 preempt_disable();
1151
1152 get_tod_clock_ext((char *)&htod);
1153
1154 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001155 gtod->epoch_idx = 0;
1156 if (test_kvm_facility(kvm, 139)) {
1157 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1158 if (gtod->tod < htod.tod)
1159 gtod->epoch_idx += 1;
1160 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001161
1162 preempt_enable();
1163}
1164
1165static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1166{
1167 struct kvm_s390_vm_tod_clock gtod;
1168
1169 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001170 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001171 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1172 return -EFAULT;
1173
1174 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1175 gtod.epoch_idx, gtod.tod);
1176 return 0;
1177}
1178
Jason J. Herne72f25022014-11-25 09:46:02 -05001179static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1180{
1181 u8 gtod_high = 0;
1182
1183 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1184 sizeof(gtod_high)))
1185 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001186 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001187
1188 return 0;
1189}
1190
1191static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1192{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001193 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001194
David Hildenbrand60417fc2015-09-29 16:20:36 +02001195 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001196 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1197 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001198 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001199
1200 return 0;
1201}
1202
1203static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1204{
1205 int ret;
1206
1207 if (attr->flags)
1208 return -EINVAL;
1209
1210 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001211 case KVM_S390_VM_TOD_EXT:
1212 ret = kvm_s390_get_tod_ext(kvm, attr);
1213 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001214 case KVM_S390_VM_TOD_HIGH:
1215 ret = kvm_s390_get_tod_high(kvm, attr);
1216 break;
1217 case KVM_S390_VM_TOD_LOW:
1218 ret = kvm_s390_get_tod_low(kvm, attr);
1219 break;
1220 default:
1221 ret = -ENXIO;
1222 break;
1223 }
1224 return ret;
1225}
1226
Michael Mueller658b6ed2015-02-02 15:49:35 +01001227static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1228{
1229 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001230 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001231 int ret = 0;
1232
1233 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001234 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001235 ret = -EBUSY;
1236 goto out;
1237 }
1238 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1239 if (!proc) {
1240 ret = -ENOMEM;
1241 goto out;
1242 }
1243 if (!copy_from_user(proc, (void __user *)attr->addr,
1244 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001245 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001246 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1247 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001248 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001249 if (proc->ibc > unblocked_ibc)
1250 kvm->arch.model.ibc = unblocked_ibc;
1251 else if (proc->ibc < lowest_ibc)
1252 kvm->arch.model.ibc = lowest_ibc;
1253 else
1254 kvm->arch.model.ibc = proc->ibc;
1255 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001256 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001257 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001258 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1259 kvm->arch.model.ibc,
1260 kvm->arch.model.cpuid);
1261 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1262 kvm->arch.model.fac_list[0],
1263 kvm->arch.model.fac_list[1],
1264 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001265 } else
1266 ret = -EFAULT;
1267 kfree(proc);
1268out:
1269 mutex_unlock(&kvm->lock);
1270 return ret;
1271}
1272
David Hildenbrand15c97052015-03-19 17:36:43 +01001273static int kvm_s390_set_processor_feat(struct kvm *kvm,
1274 struct kvm_device_attr *attr)
1275{
1276 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001277
1278 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1279 return -EFAULT;
1280 if (!bitmap_subset((unsigned long *) data.feat,
1281 kvm_s390_available_cpu_feat,
1282 KVM_S390_VM_CPU_FEAT_NR_BITS))
1283 return -EINVAL;
1284
1285 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001286 if (kvm->created_vcpus) {
1287 mutex_unlock(&kvm->lock);
1288 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001289 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001290 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1291 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001292 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001293 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1294 data.feat[0],
1295 data.feat[1],
1296 data.feat[2]);
1297 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001298}
1299
David Hildenbrand0a763c72016-05-18 16:03:47 +02001300static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1301 struct kvm_device_attr *attr)
1302{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001303 mutex_lock(&kvm->lock);
1304 if (kvm->created_vcpus) {
1305 mutex_unlock(&kvm->lock);
1306 return -EBUSY;
1307 }
1308
1309 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1310 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1311 mutex_unlock(&kvm->lock);
1312 return -EFAULT;
1313 }
1314 mutex_unlock(&kvm->lock);
1315
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001316 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1317 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1318 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1319 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1320 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1321 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1322 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1324 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1325 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1326 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1327 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1328 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1329 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1330 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1331 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1332 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1333 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1334 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1335 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1336 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1337 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1339 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1340 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1341 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1342 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1345 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1348 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1349 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1350 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1351 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1352 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1354 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1355 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1356 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1357 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1358 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1359 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001360 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001363 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1364 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001368 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001373
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001374 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001375}
1376
Michael Mueller658b6ed2015-02-02 15:49:35 +01001377static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1378{
1379 int ret = -ENXIO;
1380
1381 switch (attr->attr) {
1382 case KVM_S390_VM_CPU_PROCESSOR:
1383 ret = kvm_s390_set_processor(kvm, attr);
1384 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001385 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1386 ret = kvm_s390_set_processor_feat(kvm, attr);
1387 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001388 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1389 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1390 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001391 }
1392 return ret;
1393}
1394
1395static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1396{
1397 struct kvm_s390_vm_cpu_processor *proc;
1398 int ret = 0;
1399
1400 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1401 if (!proc) {
1402 ret = -ENOMEM;
1403 goto out;
1404 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001405 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001406 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001407 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1408 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001409 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1410 kvm->arch.model.ibc,
1411 kvm->arch.model.cpuid);
1412 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1413 kvm->arch.model.fac_list[0],
1414 kvm->arch.model.fac_list[1],
1415 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001416 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1417 ret = -EFAULT;
1418 kfree(proc);
1419out:
1420 return ret;
1421}
1422
1423static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1424{
1425 struct kvm_s390_vm_cpu_machine *mach;
1426 int ret = 0;
1427
1428 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1429 if (!mach) {
1430 ret = -ENOMEM;
1431 goto out;
1432 }
1433 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001434 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001435 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001436 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001437 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001438 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001439 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1440 kvm->arch.model.ibc,
1441 kvm->arch.model.cpuid);
1442 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1443 mach->fac_mask[0],
1444 mach->fac_mask[1],
1445 mach->fac_mask[2]);
1446 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1447 mach->fac_list[0],
1448 mach->fac_list[1],
1449 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001450 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1451 ret = -EFAULT;
1452 kfree(mach);
1453out:
1454 return ret;
1455}
1456
David Hildenbrand15c97052015-03-19 17:36:43 +01001457static int kvm_s390_get_processor_feat(struct kvm *kvm,
1458 struct kvm_device_attr *attr)
1459{
1460 struct kvm_s390_vm_cpu_feat data;
1461
1462 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1463 KVM_S390_VM_CPU_FEAT_NR_BITS);
1464 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1465 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001466 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1467 data.feat[0],
1468 data.feat[1],
1469 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001470 return 0;
1471}
1472
1473static int kvm_s390_get_machine_feat(struct kvm *kvm,
1474 struct kvm_device_attr *attr)
1475{
1476 struct kvm_s390_vm_cpu_feat data;
1477
1478 bitmap_copy((unsigned long *) data.feat,
1479 kvm_s390_available_cpu_feat,
1480 KVM_S390_VM_CPU_FEAT_NR_BITS);
1481 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1482 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001483 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1484 data.feat[0],
1485 data.feat[1],
1486 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001487 return 0;
1488}
1489
David Hildenbrand0a763c72016-05-18 16:03:47 +02001490static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1491 struct kvm_device_attr *attr)
1492{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001493 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1494 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1495 return -EFAULT;
1496
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001497 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1498 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1499 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1500 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1501 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1502 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1503 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1504 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1505 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1506 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1507 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1508 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1510 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1511 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1512 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1513 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1514 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1515 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1516 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1517 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1518 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1520 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1532 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1535 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1538 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1540 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001541 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001544 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001549 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001554
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001555 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001556}
1557
1558static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1559 struct kvm_device_attr *attr)
1560{
1561 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1562 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1563 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001564
1565 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1567 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1568 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1569 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1570 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1571 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1572 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1573 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1574 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1575 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1576 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1578 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1579 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1580 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1581 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1582 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1583 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1584 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1585 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1586 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1587 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1588 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1590 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1591 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1592 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1594 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1597 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1599 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1600 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1601 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1602 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1603 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1604 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1605 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1606 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1607 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1608 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001609 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001612 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1613 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1614 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1615 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1616 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001617 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1620 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1621 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001622
David Hildenbrand0a763c72016-05-18 16:03:47 +02001623 return 0;
1624}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001625
Michael Mueller658b6ed2015-02-02 15:49:35 +01001626static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1627{
1628 int ret = -ENXIO;
1629
1630 switch (attr->attr) {
1631 case KVM_S390_VM_CPU_PROCESSOR:
1632 ret = kvm_s390_get_processor(kvm, attr);
1633 break;
1634 case KVM_S390_VM_CPU_MACHINE:
1635 ret = kvm_s390_get_machine(kvm, attr);
1636 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001637 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1638 ret = kvm_s390_get_processor_feat(kvm, attr);
1639 break;
1640 case KVM_S390_VM_CPU_MACHINE_FEAT:
1641 ret = kvm_s390_get_machine_feat(kvm, attr);
1642 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001643 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1644 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1645 break;
1646 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1647 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1648 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001649 }
1650 return ret;
1651}
1652
Dominik Dingelf2061652014-04-09 13:13:00 +02001653static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1654{
1655 int ret;
1656
1657 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001658 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001659 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001660 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001661 case KVM_S390_VM_TOD:
1662 ret = kvm_s390_set_tod(kvm, attr);
1663 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001664 case KVM_S390_VM_CPU_MODEL:
1665 ret = kvm_s390_set_cpu_model(kvm, attr);
1666 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001667 case KVM_S390_VM_CRYPTO:
1668 ret = kvm_s390_vm_set_crypto(kvm, attr);
1669 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001670 case KVM_S390_VM_MIGRATION:
1671 ret = kvm_s390_vm_set_migration(kvm, attr);
1672 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001673 default:
1674 ret = -ENXIO;
1675 break;
1676 }
1677
1678 return ret;
1679}
1680
1681static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1682{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001683 int ret;
1684
1685 switch (attr->group) {
1686 case KVM_S390_VM_MEM_CTRL:
1687 ret = kvm_s390_get_mem_control(kvm, attr);
1688 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001689 case KVM_S390_VM_TOD:
1690 ret = kvm_s390_get_tod(kvm, attr);
1691 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001692 case KVM_S390_VM_CPU_MODEL:
1693 ret = kvm_s390_get_cpu_model(kvm, attr);
1694 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001695 case KVM_S390_VM_MIGRATION:
1696 ret = kvm_s390_vm_get_migration(kvm, attr);
1697 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001698 default:
1699 ret = -ENXIO;
1700 break;
1701 }
1702
1703 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001704}
1705
1706static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1707{
1708 int ret;
1709
1710 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001711 case KVM_S390_VM_MEM_CTRL:
1712 switch (attr->attr) {
1713 case KVM_S390_VM_MEM_ENABLE_CMMA:
1714 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001715 ret = sclp.has_cmma ? 0 : -ENXIO;
1716 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001717 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001718 ret = 0;
1719 break;
1720 default:
1721 ret = -ENXIO;
1722 break;
1723 }
1724 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001725 case KVM_S390_VM_TOD:
1726 switch (attr->attr) {
1727 case KVM_S390_VM_TOD_LOW:
1728 case KVM_S390_VM_TOD_HIGH:
1729 ret = 0;
1730 break;
1731 default:
1732 ret = -ENXIO;
1733 break;
1734 }
1735 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001736 case KVM_S390_VM_CPU_MODEL:
1737 switch (attr->attr) {
1738 case KVM_S390_VM_CPU_PROCESSOR:
1739 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001740 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1741 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001742 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001743 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001744 ret = 0;
1745 break;
1746 default:
1747 ret = -ENXIO;
1748 break;
1749 }
1750 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001751 case KVM_S390_VM_CRYPTO:
1752 switch (attr->attr) {
1753 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1754 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1755 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1756 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1757 ret = 0;
1758 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001759 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1760 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1761 ret = ap_instructions_available() ? 0 : -ENXIO;
1762 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001763 default:
1764 ret = -ENXIO;
1765 break;
1766 }
1767 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001768 case KVM_S390_VM_MIGRATION:
1769 ret = 0;
1770 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001771 default:
1772 ret = -ENXIO;
1773 break;
1774 }
1775
1776 return ret;
1777}
1778
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001779static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1780{
1781 uint8_t *keys;
1782 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001783 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001784
1785 if (args->flags != 0)
1786 return -EINVAL;
1787
1788 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001789 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001790 return KVM_S390_GET_SKEYS_NONE;
1791
1792 /* Enforce sane limit on memory allocation */
1793 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1794 return -EINVAL;
1795
Michal Hocko752ade62017-05-08 15:57:27 -07001796 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001797 if (!keys)
1798 return -ENOMEM;
1799
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001800 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001801 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001802 for (i = 0; i < args->count; i++) {
1803 hva = gfn_to_hva(kvm, args->start_gfn + i);
1804 if (kvm_is_error_hva(hva)) {
1805 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001806 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001807 }
1808
David Hildenbrand154c8c12016-05-09 11:22:34 +02001809 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1810 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001811 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001812 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001813 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001814 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001815
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001816 if (!r) {
1817 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1818 sizeof(uint8_t) * args->count);
1819 if (r)
1820 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001821 }
1822
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823 kvfree(keys);
1824 return r;
1825}
1826
1827static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1828{
1829 uint8_t *keys;
1830 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001831 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001832 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001833
1834 if (args->flags != 0)
1835 return -EINVAL;
1836
1837 /* Enforce sane limit on memory allocation */
1838 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1839 return -EINVAL;
1840
Michal Hocko752ade62017-05-08 15:57:27 -07001841 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001842 if (!keys)
1843 return -ENOMEM;
1844
1845 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1846 sizeof(uint8_t) * args->count);
1847 if (r) {
1848 r = -EFAULT;
1849 goto out;
1850 }
1851
1852 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001853 r = s390_enable_skey();
1854 if (r)
1855 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001856
Janosch Frankbd096f62018-07-18 13:40:22 +01001857 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001858 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001859 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001860 while (i < args->count) {
1861 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001862 hva = gfn_to_hva(kvm, args->start_gfn + i);
1863 if (kvm_is_error_hva(hva)) {
1864 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001865 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001866 }
1867
1868 /* Lowest order bit is reserved */
1869 if (keys[i] & 0x01) {
1870 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001871 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001872 }
1873
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001874 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001875 if (r) {
1876 r = fixup_user_fault(current, current->mm, hva,
1877 FAULT_FLAG_WRITE, &unlocked);
1878 if (r)
1879 break;
1880 }
1881 if (!r)
1882 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001883 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001884 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001885 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001886out:
1887 kvfree(keys);
1888 return r;
1889}
1890
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001891/*
1892 * Base address and length must be sent at the start of each block, therefore
1893 * it's cheaper to send some clean data, as long as it's less than the size of
1894 * two longs.
1895 */
1896#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1897/* for consistency */
1898#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1899
1900/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001901 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1902 * address falls in a hole. In that case the index of one of the memslots
1903 * bordering the hole is returned.
1904 */
1905static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1906{
1907 int start = 0, end = slots->used_slots;
1908 int slot = atomic_read(&slots->lru_slot);
1909 struct kvm_memory_slot *memslots = slots->memslots;
1910
1911 if (gfn >= memslots[slot].base_gfn &&
1912 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1913 return slot;
1914
1915 while (start < end) {
1916 slot = start + (end - start) / 2;
1917
1918 if (gfn >= memslots[slot].base_gfn)
1919 end = slot;
1920 else
1921 start = slot + 1;
1922 }
1923
1924 if (gfn >= memslots[start].base_gfn &&
1925 gfn < memslots[start].base_gfn + memslots[start].npages) {
1926 atomic_set(&slots->lru_slot, start);
1927 }
1928
1929 return start;
1930}
1931
1932static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1933 u8 *res, unsigned long bufsize)
1934{
1935 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1936
1937 args->count = 0;
1938 while (args->count < bufsize) {
1939 hva = gfn_to_hva(kvm, cur_gfn);
1940 /*
1941 * We return an error if the first value was invalid, but we
1942 * return successfully if at least one value was copied.
1943 */
1944 if (kvm_is_error_hva(hva))
1945 return args->count ? 0 : -EFAULT;
1946 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1947 pgstev = 0;
1948 res[args->count++] = (pgstev >> 24) & 0x43;
1949 cur_gfn++;
1950 }
1951
1952 return 0;
1953}
1954
1955static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1956 unsigned long cur_gfn)
1957{
1958 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1959 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1960 unsigned long ofs = cur_gfn - ms->base_gfn;
1961
1962 if (ms->base_gfn + ms->npages <= cur_gfn) {
1963 slotidx--;
1964 /* If we are above the highest slot, wrap around */
1965 if (slotidx < 0)
1966 slotidx = slots->used_slots - 1;
1967
1968 ms = slots->memslots + slotidx;
1969 ofs = 0;
1970 }
1971 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1972 while ((slotidx > 0) && (ofs >= ms->npages)) {
1973 slotidx--;
1974 ms = slots->memslots + slotidx;
1975 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1976 }
1977 return ms->base_gfn + ofs;
1978}
1979
1980static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1981 u8 *res, unsigned long bufsize)
1982{
1983 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
1984 struct kvm_memslots *slots = kvm_memslots(kvm);
1985 struct kvm_memory_slot *ms;
1986
1987 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
1988 ms = gfn_to_memslot(kvm, cur_gfn);
1989 args->count = 0;
1990 args->start_gfn = cur_gfn;
1991 if (!ms)
1992 return 0;
1993 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
1994 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
1995
1996 while (args->count < bufsize) {
1997 hva = gfn_to_hva(kvm, cur_gfn);
1998 if (kvm_is_error_hva(hva))
1999 return 0;
2000 /* Decrement only if we actually flipped the bit to 0 */
2001 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2002 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2003 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2004 pgstev = 0;
2005 /* Save the value */
2006 res[args->count++] = (pgstev >> 24) & 0x43;
2007 /* If the next bit is too far away, stop. */
2008 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2009 return 0;
2010 /* If we reached the previous "next", find the next one */
2011 if (cur_gfn == next_gfn)
2012 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2013 /* Reached the end of memory or of the buffer, stop */
2014 if ((next_gfn >= mem_end) ||
2015 (next_gfn - args->start_gfn >= bufsize))
2016 return 0;
2017 cur_gfn++;
2018 /* Reached the end of the current memslot, take the next one. */
2019 if (cur_gfn - ms->base_gfn >= ms->npages) {
2020 ms = gfn_to_memslot(kvm, cur_gfn);
2021 if (!ms)
2022 return 0;
2023 }
2024 }
2025 return 0;
2026}
2027
2028/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002029 * This function searches for the next page with dirty CMMA attributes, and
2030 * saves the attributes in the buffer up to either the end of the buffer or
2031 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2032 * no trailing clean bytes are saved.
2033 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2034 * output buffer will indicate 0 as length.
2035 */
2036static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2037 struct kvm_s390_cmma_log *args)
2038{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002039 unsigned long bufsize;
2040 int srcu_idx, peek, ret;
2041 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002042
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002043 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002044 return -ENXIO;
2045 /* Invalid/unsupported flags were specified */
2046 if (args->flags & ~KVM_S390_CMMA_PEEK)
2047 return -EINVAL;
2048 /* Migration mode query, and we are not doing a migration */
2049 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002050 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002051 return -EINVAL;
2052 /* CMMA is disabled or was not used, or the buffer has length zero */
2053 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002054 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002055 memset(args, 0, sizeof(*args));
2056 return 0;
2057 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002058 /* We are not peeking, and there are no dirty pages */
2059 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2060 memset(args, 0, sizeof(*args));
2061 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002062 }
2063
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002064 values = vmalloc(bufsize);
2065 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002066 return -ENOMEM;
2067
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002068 down_read(&kvm->mm->mmap_sem);
2069 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002070 if (peek)
2071 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2072 else
2073 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 srcu_read_unlock(&kvm->srcu, srcu_idx);
2075 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002077 if (kvm->arch.migration_mode)
2078 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2079 else
2080 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002081
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002082 if (copy_to_user((void __user *)args->values, values, args->count))
2083 ret = -EFAULT;
2084
2085 vfree(values);
2086 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087}
2088
2089/*
2090 * This function sets the CMMA attributes for the given pages. If the input
2091 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002092 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002093 */
2094static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2095 const struct kvm_s390_cmma_log *args)
2096{
2097 unsigned long hva, mask, pgstev, i;
2098 uint8_t *bits;
2099 int srcu_idx, r = 0;
2100
2101 mask = args->mask;
2102
2103 if (!kvm->arch.use_cmma)
2104 return -ENXIO;
2105 /* invalid/unsupported flags */
2106 if (args->flags != 0)
2107 return -EINVAL;
2108 /* Enforce sane limit on memory allocation */
2109 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2110 return -EINVAL;
2111 /* Nothing to do */
2112 if (args->count == 0)
2113 return 0;
2114
Kees Cook42bc47b2018-06-12 14:27:11 -07002115 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002116 if (!bits)
2117 return -ENOMEM;
2118
2119 r = copy_from_user(bits, (void __user *)args->values, args->count);
2120 if (r) {
2121 r = -EFAULT;
2122 goto out;
2123 }
2124
2125 down_read(&kvm->mm->mmap_sem);
2126 srcu_idx = srcu_read_lock(&kvm->srcu);
2127 for (i = 0; i < args->count; i++) {
2128 hva = gfn_to_hva(kvm, args->start_gfn + i);
2129 if (kvm_is_error_hva(hva)) {
2130 r = -EFAULT;
2131 break;
2132 }
2133
2134 pgstev = bits[i];
2135 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002136 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002137 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2138 }
2139 srcu_read_unlock(&kvm->srcu, srcu_idx);
2140 up_read(&kvm->mm->mmap_sem);
2141
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002142 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002143 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002144 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002145 up_write(&kvm->mm->mmap_sem);
2146 }
2147out:
2148 vfree(bits);
2149 return r;
2150}
2151
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002152long kvm_arch_vm_ioctl(struct file *filp,
2153 unsigned int ioctl, unsigned long arg)
2154{
2155 struct kvm *kvm = filp->private_data;
2156 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002157 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002158 int r;
2159
2160 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002161 case KVM_S390_INTERRUPT: {
2162 struct kvm_s390_interrupt s390int;
2163
2164 r = -EFAULT;
2165 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2166 break;
2167 r = kvm_s390_inject_vm(kvm, &s390int);
2168 break;
2169 }
Cornelia Huck84223592013-07-15 13:36:01 +02002170 case KVM_CREATE_IRQCHIP: {
2171 struct kvm_irq_routing_entry routing;
2172
2173 r = -EINVAL;
2174 if (kvm->arch.use_irqchip) {
2175 /* Set up dummy routing. */
2176 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002177 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002178 }
2179 break;
2180 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002181 case KVM_SET_DEVICE_ATTR: {
2182 r = -EFAULT;
2183 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2184 break;
2185 r = kvm_s390_vm_set_attr(kvm, &attr);
2186 break;
2187 }
2188 case KVM_GET_DEVICE_ATTR: {
2189 r = -EFAULT;
2190 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2191 break;
2192 r = kvm_s390_vm_get_attr(kvm, &attr);
2193 break;
2194 }
2195 case KVM_HAS_DEVICE_ATTR: {
2196 r = -EFAULT;
2197 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2198 break;
2199 r = kvm_s390_vm_has_attr(kvm, &attr);
2200 break;
2201 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002202 case KVM_S390_GET_SKEYS: {
2203 struct kvm_s390_skeys args;
2204
2205 r = -EFAULT;
2206 if (copy_from_user(&args, argp,
2207 sizeof(struct kvm_s390_skeys)))
2208 break;
2209 r = kvm_s390_get_skeys(kvm, &args);
2210 break;
2211 }
2212 case KVM_S390_SET_SKEYS: {
2213 struct kvm_s390_skeys args;
2214
2215 r = -EFAULT;
2216 if (copy_from_user(&args, argp,
2217 sizeof(struct kvm_s390_skeys)))
2218 break;
2219 r = kvm_s390_set_skeys(kvm, &args);
2220 break;
2221 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002222 case KVM_S390_GET_CMMA_BITS: {
2223 struct kvm_s390_cmma_log args;
2224
2225 r = -EFAULT;
2226 if (copy_from_user(&args, argp, sizeof(args)))
2227 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002228 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002229 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002230 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002231 if (!r) {
2232 r = copy_to_user(argp, &args, sizeof(args));
2233 if (r)
2234 r = -EFAULT;
2235 }
2236 break;
2237 }
2238 case KVM_S390_SET_CMMA_BITS: {
2239 struct kvm_s390_cmma_log args;
2240
2241 r = -EFAULT;
2242 if (copy_from_user(&args, argp, sizeof(args)))
2243 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002244 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002245 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002246 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002247 break;
2248 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002249 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002250 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002251 }
2252
2253 return r;
2254}
2255
Tony Krowiak45c9b472015-01-13 11:33:26 -05002256static int kvm_s390_apxa_installed(void)
2257{
Tony Krowiake585b242018-09-25 19:16:18 -04002258 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002259
Tony Krowiake585b242018-09-25 19:16:18 -04002260 if (ap_instructions_available()) {
2261 if (ap_qci(&info) == 0)
2262 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002263 }
2264
2265 return 0;
2266}
2267
Tony Krowiake585b242018-09-25 19:16:18 -04002268/*
2269 * The format of the crypto control block (CRYCB) is specified in the 3 low
2270 * order bits of the CRYCB designation (CRYCBD) field as follows:
2271 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2272 * AP extended addressing (APXA) facility are installed.
2273 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2274 * Format 2: Both the APXA and MSAX3 facilities are installed
2275 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002276static void kvm_s390_set_crycb_format(struct kvm *kvm)
2277{
2278 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2279
Tony Krowiake585b242018-09-25 19:16:18 -04002280 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2281 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2282
2283 /* Check whether MSAX3 is installed */
2284 if (!test_kvm_facility(kvm, 76))
2285 return;
2286
Tony Krowiak45c9b472015-01-13 11:33:26 -05002287 if (kvm_s390_apxa_installed())
2288 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2289 else
2290 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2291}
2292
Pierre Morel0e237e42018-10-05 10:31:09 +02002293void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2294 unsigned long *aqm, unsigned long *adm)
2295{
2296 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2297
2298 mutex_lock(&kvm->lock);
2299 kvm_s390_vcpu_block_all(kvm);
2300
2301 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2302 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2303 memcpy(crycb->apcb1.apm, apm, 32);
2304 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2305 apm[0], apm[1], apm[2], apm[3]);
2306 memcpy(crycb->apcb1.aqm, aqm, 32);
2307 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2308 aqm[0], aqm[1], aqm[2], aqm[3]);
2309 memcpy(crycb->apcb1.adm, adm, 32);
2310 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2311 adm[0], adm[1], adm[2], adm[3]);
2312 break;
2313 case CRYCB_FORMAT1:
2314 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2315 memcpy(crycb->apcb0.apm, apm, 8);
2316 memcpy(crycb->apcb0.aqm, aqm, 2);
2317 memcpy(crycb->apcb0.adm, adm, 2);
2318 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2319 apm[0], *((unsigned short *)aqm),
2320 *((unsigned short *)adm));
2321 break;
2322 default: /* Can not happen */
2323 break;
2324 }
2325
2326 /* recreate the shadow crycb for each vcpu */
2327 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2328 kvm_s390_vcpu_unblock_all(kvm);
2329 mutex_unlock(&kvm->lock);
2330}
2331EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2332
Tony Krowiak421045982018-09-25 19:16:25 -04002333void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2334{
2335 mutex_lock(&kvm->lock);
2336 kvm_s390_vcpu_block_all(kvm);
2337
2338 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2339 sizeof(kvm->arch.crypto.crycb->apcb0));
2340 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2341 sizeof(kvm->arch.crypto.crycb->apcb1));
2342
Pierre Morel0e237e42018-10-05 10:31:09 +02002343 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002344 /* recreate the shadow crycb for each vcpu */
2345 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002346 kvm_s390_vcpu_unblock_all(kvm);
2347 mutex_unlock(&kvm->lock);
2348}
2349EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2350
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002351static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002352{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002353 struct cpuid cpuid;
2354
2355 get_cpu_id(&cpuid);
2356 cpuid.version = 0xff;
2357 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002358}
2359
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002360static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002361{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002362 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002363 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002364
Tony Krowiake585b242018-09-25 19:16:18 -04002365 if (!test_kvm_facility(kvm, 76))
2366 return;
2367
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002368 /* Enable AES/DEA protected key functions by default */
2369 kvm->arch.crypto.aes_kw = 1;
2370 kvm->arch.crypto.dea_kw = 1;
2371 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2372 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2373 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2374 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002375}
2376
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002377static void sca_dispose(struct kvm *kvm)
2378{
2379 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002380 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002381 else
2382 free_page((unsigned long)(kvm->arch.sca));
2383 kvm->arch.sca = NULL;
2384}
2385
Carsten Ottee08b9632012-01-04 10:25:20 +01002386int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002387{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002388 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002389 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002391 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002392
Carsten Ottee08b9632012-01-04 10:25:20 +01002393 rc = -EINVAL;
2394#ifdef CONFIG_KVM_S390_UCONTROL
2395 if (type & ~KVM_VM_S390_UCONTROL)
2396 goto out_err;
2397 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2398 goto out_err;
2399#else
2400 if (type)
2401 goto out_err;
2402#endif
2403
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002404 rc = s390_enable_sie();
2405 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002406 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407
Carsten Otteb2904112011-10-18 12:27:13 +02002408 rc = -ENOMEM;
2409
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002410 if (!sclp.has_64bscao)
2411 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002412 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002413 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002414 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002415 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002416 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002417 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002418 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002419 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002420 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002421 kvm->arch.sca = (struct bsca_block *)
2422 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002423 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002424
2425 sprintf(debug_name, "kvm-%u", current->pid);
2426
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002427 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002428 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002429 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002430
Michael Mueller19114be2017-05-30 14:26:02 +02002431 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002432 kvm->arch.sie_page2 =
2433 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2434 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002435 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002436
Michael Mueller25c84db2019-01-31 09:52:41 +01002437 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002438 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002439
2440 for (i = 0; i < kvm_s390_fac_size(); i++) {
2441 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2442 (kvm_s390_fac_base[i] |
2443 kvm_s390_fac_ext[i]);
2444 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2445 kvm_s390_fac_base[i];
2446 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002447 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002448
David Hildenbrand19352222017-08-29 16:31:08 +02002449 /* we are always in czam mode - even on pre z14 machines */
2450 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2451 set_kvm_facility(kvm->arch.model.fac_list, 138);
2452 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002453 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2454 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002455 if (MACHINE_HAS_TLB_GUEST) {
2456 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2457 set_kvm_facility(kvm->arch.model.fac_list, 147);
2458 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002459
Pierre Morel05f31e32019-05-21 17:34:37 +02002460 if (css_general_characteristics.aiv && test_facility(65))
2461 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2462
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002463 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002464 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002465
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002466 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002467
Fei Li51978392017-02-17 17:06:26 +08002468 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002469 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002470 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2471 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002472 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002473 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002474
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002475 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002476 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002477
Carsten Ottee08b9632012-01-04 10:25:20 +01002478 if (type & KVM_VM_S390_UCONTROL) {
2479 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002480 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002481 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002482 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002483 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002484 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002485 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002486 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002487 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002488 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002489 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002490 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002491 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002492 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002493
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002494 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002495 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002496 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002497 kvm_s390_vsie_init(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002498 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002499 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002500
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002501 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002502out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002503 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002504 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002505 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002506 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002507 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002508}
2509
Christian Borntraegerd329c032008-11-26 14:50:27 +01002510void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2511{
2512 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002513 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002514 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002515 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002516 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002517 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002518
2519 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002520 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002521
Dominik Dingele6db1d62015-05-07 15:41:57 +02002522 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002523 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002524 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002525}
2526
2527static void kvm_free_vcpus(struct kvm *kvm)
2528{
2529 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002530 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002531
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002532 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002533 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002534
2535 mutex_lock(&kvm->lock);
2536 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2537 kvm->vcpus[i] = NULL;
2538
2539 atomic_set(&kvm->online_vcpus, 0);
2540 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002541}
2542
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002543void kvm_arch_destroy_vm(struct kvm *kvm)
2544{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002545 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002546 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002547 debug_unregister(kvm->arch.dbf);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002548 kvm_s390_gisa_destroy(kvm);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002549 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002550 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002551 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002552 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002553 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002554 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002555 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002556}
2557
2558/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002559static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2560{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002561 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002562 if (!vcpu->arch.gmap)
2563 return -ENOMEM;
2564 vcpu->arch.gmap->private = vcpu->kvm;
2565
2566 return 0;
2567}
2568
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002569static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2570{
David Hildenbranda6940672016-08-08 22:39:32 +02002571 if (!kvm_s390_use_sca_entries())
2572 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002573 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002574 if (vcpu->kvm->arch.use_esca) {
2575 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002576
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002577 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002578 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002579 } else {
2580 struct bsca_block *sca = vcpu->kvm->arch.sca;
2581
2582 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002583 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002584 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002585 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002586}
2587
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002588static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002589{
David Hildenbranda6940672016-08-08 22:39:32 +02002590 if (!kvm_s390_use_sca_entries()) {
2591 struct bsca_block *sca = vcpu->kvm->arch.sca;
2592
2593 /* we still need the basic sca for the ipte control */
2594 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2595 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002596 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002597 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002598 read_lock(&vcpu->kvm->arch.sca_lock);
2599 if (vcpu->kvm->arch.use_esca) {
2600 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002601
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002602 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002603 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2604 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002605 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002606 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002607 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002608 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002609
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002610 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002611 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2612 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002613 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002614 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002615 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002616}
2617
2618/* Basic SCA to Extended SCA data copy routines */
2619static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2620{
2621 d->sda = s->sda;
2622 d->sigp_ctrl.c = s->sigp_ctrl.c;
2623 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2624}
2625
2626static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2627{
2628 int i;
2629
2630 d->ipte_control = s->ipte_control;
2631 d->mcn[0] = s->mcn;
2632 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2633 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2634}
2635
2636static int sca_switch_to_extended(struct kvm *kvm)
2637{
2638 struct bsca_block *old_sca = kvm->arch.sca;
2639 struct esca_block *new_sca;
2640 struct kvm_vcpu *vcpu;
2641 unsigned int vcpu_idx;
2642 u32 scaol, scaoh;
2643
2644 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2645 if (!new_sca)
2646 return -ENOMEM;
2647
2648 scaoh = (u32)((u64)(new_sca) >> 32);
2649 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2650
2651 kvm_s390_vcpu_block_all(kvm);
2652 write_lock(&kvm->arch.sca_lock);
2653
2654 sca_copy_b_to_e(new_sca, old_sca);
2655
2656 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2657 vcpu->arch.sie_block->scaoh = scaoh;
2658 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002659 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002660 }
2661 kvm->arch.sca = new_sca;
2662 kvm->arch.use_esca = 1;
2663
2664 write_unlock(&kvm->arch.sca_lock);
2665 kvm_s390_vcpu_unblock_all(kvm);
2666
2667 free_page((unsigned long)old_sca);
2668
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002669 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2670 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002671 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002672}
2673
2674static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2675{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002676 int rc;
2677
David Hildenbranda6940672016-08-08 22:39:32 +02002678 if (!kvm_s390_use_sca_entries()) {
2679 if (id < KVM_MAX_VCPUS)
2680 return true;
2681 return false;
2682 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002683 if (id < KVM_S390_BSCA_CPU_SLOTS)
2684 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002685 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002686 return false;
2687
2688 mutex_lock(&kvm->lock);
2689 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2690 mutex_unlock(&kvm->lock);
2691
2692 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002693}
2694
David Hildenbranddb0758b2016-02-15 09:42:25 +01002695/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2696static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2697{
2698 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002699 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002700 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002701 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002702}
2703
2704/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2705static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2706{
2707 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002708 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002709 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2710 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002711 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002712}
2713
2714/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2715static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2716{
2717 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2718 vcpu->arch.cputm_enabled = true;
2719 __start_cpu_timer_accounting(vcpu);
2720}
2721
2722/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2723static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2724{
2725 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2726 __stop_cpu_timer_accounting(vcpu);
2727 vcpu->arch.cputm_enabled = false;
2728}
2729
2730static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2731{
2732 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2733 __enable_cpu_timer_accounting(vcpu);
2734 preempt_enable();
2735}
2736
2737static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2738{
2739 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2740 __disable_cpu_timer_accounting(vcpu);
2741 preempt_enable();
2742}
2743
David Hildenbrand4287f242016-02-15 09:40:12 +01002744/* set the cpu timer - may only be called from the VCPU thread itself */
2745void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2746{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002747 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002748 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002749 if (vcpu->arch.cputm_enabled)
2750 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002751 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002752 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002753 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002754}
2755
David Hildenbranddb0758b2016-02-15 09:42:25 +01002756/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002757__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2758{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002759 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002760 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002761
2762 if (unlikely(!vcpu->arch.cputm_enabled))
2763 return vcpu->arch.sie_block->cputm;
2764
David Hildenbrand9c23a132016-02-17 21:53:33 +01002765 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2766 do {
2767 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2768 /*
2769 * If the writer would ever execute a read in the critical
2770 * section, e.g. in irq context, we have a deadlock.
2771 */
2772 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2773 value = vcpu->arch.sie_block->cputm;
2774 /* if cputm_start is 0, accounting is being started/stopped */
2775 if (likely(vcpu->arch.cputm_start))
2776 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2777 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2778 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002779 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002780}
2781
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002782void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2783{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002784
David Hildenbrand37d9df92015-03-11 16:47:33 +01002785 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002786 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002787 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002788 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002789 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002790}
2791
2792void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2793{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002794 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002795 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002796 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01002797 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002798 vcpu->arch.enabled_gmap = gmap_get_enabled();
2799 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002800
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002801}
2802
Dominik Dingel31928aa2014-12-04 15:47:07 +01002803void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002804{
Jason J. Herne72f25022014-11-25 09:46:02 -05002805 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002806 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002807 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01002808 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02002809 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002810 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002811 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002812 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002813 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002814 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002815 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2816 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002817 /* make vcpu_load load the right gmap on the first trigger */
2818 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002819}
2820
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002821static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2822{
2823 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2824 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2825 return true;
2826 return false;
2827}
2828
2829static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2830{
2831 /* At least one ECC subfunction must be present */
2832 return kvm_has_pckmo_subfunc(kvm, 32) ||
2833 kvm_has_pckmo_subfunc(kvm, 33) ||
2834 kvm_has_pckmo_subfunc(kvm, 34) ||
2835 kvm_has_pckmo_subfunc(kvm, 40) ||
2836 kvm_has_pckmo_subfunc(kvm, 41);
2837
2838}
2839
Tony Krowiak5102ee82014-06-27 14:46:01 -04002840static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2841{
Tony Krowiake585b242018-09-25 19:16:18 -04002842 /*
2843 * If the AP instructions are not being interpreted and the MSAX3
2844 * facility is not configured for the guest, there is nothing to set up.
2845 */
2846 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002847 return;
2848
Tony Krowiake585b242018-09-25 19:16:18 -04002849 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02002850 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04002851 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002852 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02002853
Tony Krowiake585b242018-09-25 19:16:18 -04002854 if (vcpu->kvm->arch.crypto.apie)
2855 vcpu->arch.sie_block->eca |= ECA_APIE;
2856
2857 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002858 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02002859 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04002860 /* ecc is also wrapped with AES key */
2861 if (kvm_has_pckmo_ecc(vcpu->kvm))
2862 vcpu->arch.sie_block->ecd |= ECD_ECC;
2863 }
2864
Tony Krowiaka374e892014-09-03 10:13:53 +02002865 if (vcpu->kvm->arch.crypto.dea_kw)
2866 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04002867}
2868
Dominik Dingelb31605c2014-03-25 13:47:11 +01002869void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2870{
2871 free_page(vcpu->arch.sie_block->cbrlo);
2872 vcpu->arch.sie_block->cbrlo = 0;
2873}
2874
2875int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2876{
2877 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2878 if (!vcpu->arch.sie_block->cbrlo)
2879 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002880 return 0;
2881}
2882
Michael Mueller91520f12015-02-27 14:32:11 +01002883static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2884{
2885 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2886
Michael Mueller91520f12015-02-27 14:32:11 +01002887 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002888 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002889 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002890}
2891
Sean Christophersonff72bb52019-12-18 13:55:20 -08002892static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
2893{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002894 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002895
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002896 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2897 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002898 CPUSTAT_STOPPED);
2899
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002900 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002901 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002902 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002903 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002904
Michael Mueller91520f12015-02-27 14:32:11 +01002905 kvm_s390_vcpu_setup_model(vcpu);
2906
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002907 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2908 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002909 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002910 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002911 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002912 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002913 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002914
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002915 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002916 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002917 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002918 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2919 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002920 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002921 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002922 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002923 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002924 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002925 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002926 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002927 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002928 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002929 vcpu->arch.sie_block->eca |= ECA_VX;
2930 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002931 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002932 if (test_kvm_facility(vcpu->kvm, 139))
2933 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00002934 if (test_kvm_facility(vcpu->kvm, 156))
2935 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002936 if (vcpu->arch.sie_block->gd) {
2937 vcpu->arch.sie_block->eca |= ECA_AIV;
2938 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2939 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2940 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002941 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2942 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002943 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002944
2945 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01002946 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05002947 else
2948 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002949
Dominik Dingele6db1d62015-05-07 15:41:57 +02002950 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002951 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2952 if (rc)
2953 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002954 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002955 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002956 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002957
Collin Walling67d49d52018-08-31 12:51:19 -04002958 vcpu->arch.sie_block->hpid = HPID_KVM;
2959
Tony Krowiak5102ee82014-06-27 14:46:01 -04002960 kvm_s390_vcpu_crypto_setup(vcpu);
2961
Dominik Dingelb31605c2014-03-25 13:47:11 +01002962 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002963}
2964
Sean Christopherson897cc382019-12-18 13:55:09 -08002965int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
2966{
2967 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
2968 return -EINVAL;
2969 return 0;
2970}
2971
Sean Christophersone529ef62019-12-18 13:55:15 -08002972int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002973{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002974 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08002975 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02002976
QingFeng Haoda72ca42017-06-07 11:41:19 +02002977 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002978 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2979 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08002980 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002981
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002982 vcpu->arch.sie_block = &sie_page->sie_block;
2983 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2984
David Hildenbrandefed1102015-04-16 12:32:41 +02002985 /* the real guest size will always be smaller than msl */
2986 vcpu->arch.sie_block->mso = 0;
2987 vcpu->arch.sie_block->msl = sclp.hamax;
2988
Sean Christophersone529ef62019-12-18 13:55:15 -08002989 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002990 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08002991 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02002992 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2993 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002994 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002995
Sean Christopherson321f8ee2019-12-18 13:55:10 -08002996 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2997 kvm_clear_async_pf_completion_queue(vcpu);
2998 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2999 KVM_SYNC_GPRS |
3000 KVM_SYNC_ACRS |
3001 KVM_SYNC_CRS |
3002 KVM_SYNC_ARCH0 |
3003 KVM_SYNC_PFAULT;
3004 kvm_s390_set_prefix(vcpu, 0);
3005 if (test_kvm_facility(vcpu->kvm, 64))
3006 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3007 if (test_kvm_facility(vcpu->kvm, 82))
3008 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3009 if (test_kvm_facility(vcpu->kvm, 133))
3010 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3011 if (test_kvm_facility(vcpu->kvm, 156))
3012 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3013 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3014 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3015 */
3016 if (MACHINE_HAS_VX)
3017 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3018 else
3019 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3020
3021 if (kvm_is_ucontrol(vcpu->kvm)) {
3022 rc = __kvm_ucontrol_vcpu_init(vcpu);
3023 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003024 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003025 }
3026
Sean Christophersone529ef62019-12-18 13:55:15 -08003027 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3028 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3029 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003030
Sean Christophersonff72bb52019-12-18 13:55:20 -08003031 rc = kvm_s390_vcpu_setup(vcpu);
3032 if (rc)
3033 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003034 return 0;
3035
Sean Christophersonff72bb52019-12-18 13:55:20 -08003036out_ucontrol_uninit:
3037 if (kvm_is_ucontrol(vcpu->kvm))
3038 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003039out_free_sie_block:
3040 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003041 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003042}
3043
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003044int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3045{
David Hildenbrand9a022062014-08-05 17:40:47 +02003046 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003047}
3048
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003049bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3050{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003051 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003052}
3053
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003054void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003055{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003056 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003057 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003058}
3059
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003060void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003061{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003062 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003063}
3064
Christian Borntraeger8e236542015-04-09 13:49:04 +02003065static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3066{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003067 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003068 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003069}
3070
David Hildenbrand9ea59722018-09-25 19:16:16 -04003071bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3072{
3073 return atomic_read(&vcpu->arch.sie_block->prog20) &
3074 (PROG_BLOCK_SIE | PROG_REQUEST);
3075}
3076
Christian Borntraeger8e236542015-04-09 13:49:04 +02003077static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3078{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003079 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003080}
3081
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003082/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003083 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003084 * If the CPU is not running (e.g. waiting as idle) the function will
3085 * return immediately. */
3086void exit_sie(struct kvm_vcpu *vcpu)
3087{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003088 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003089 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003090 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3091 cpu_relax();
3092}
3093
Christian Borntraeger8e236542015-04-09 13:49:04 +02003094/* Kick a guest cpu out of SIE to process a request synchronously */
3095void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003096{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003097 kvm_make_request(req, vcpu);
3098 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003099}
3100
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3102 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003103{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003104 struct kvm *kvm = gmap->private;
3105 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003106 unsigned long prefix;
3107 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003108
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003109 if (gmap_is_shadow(gmap))
3110 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003111 if (start >= 1UL << 31)
3112 /* We are only interested in prefix pages */
3113 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003114 kvm_for_each_vcpu(i, vcpu, kvm) {
3115 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003116 prefix = kvm_s390_get_prefix(vcpu);
3117 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3118 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3119 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003120 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003121 }
3122 }
3123}
3124
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003125bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3126{
3127 /* do not poll with more than halt_poll_max_steal percent of steal time */
3128 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3129 halt_poll_max_steal) {
3130 vcpu->stat.halt_no_poll_steal++;
3131 return true;
3132 }
3133 return false;
3134}
3135
Christoffer Dallb6d33832012-03-08 16:44:24 -05003136int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3137{
3138 /* kvm common code refers to this, but never calls it */
3139 BUG();
3140 return 0;
3141}
3142
Carsten Otte14eebd92012-05-15 14:15:26 +02003143static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3144 struct kvm_one_reg *reg)
3145{
3146 int r = -EINVAL;
3147
3148 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003149 case KVM_REG_S390_TODPR:
3150 r = put_user(vcpu->arch.sie_block->todpr,
3151 (u32 __user *)reg->addr);
3152 break;
3153 case KVM_REG_S390_EPOCHDIFF:
3154 r = put_user(vcpu->arch.sie_block->epoch,
3155 (u64 __user *)reg->addr);
3156 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003157 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003158 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003159 (u64 __user *)reg->addr);
3160 break;
3161 case KVM_REG_S390_CLOCK_COMP:
3162 r = put_user(vcpu->arch.sie_block->ckc,
3163 (u64 __user *)reg->addr);
3164 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003165 case KVM_REG_S390_PFTOKEN:
3166 r = put_user(vcpu->arch.pfault_token,
3167 (u64 __user *)reg->addr);
3168 break;
3169 case KVM_REG_S390_PFCOMPARE:
3170 r = put_user(vcpu->arch.pfault_compare,
3171 (u64 __user *)reg->addr);
3172 break;
3173 case KVM_REG_S390_PFSELECT:
3174 r = put_user(vcpu->arch.pfault_select,
3175 (u64 __user *)reg->addr);
3176 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003177 case KVM_REG_S390_PP:
3178 r = put_user(vcpu->arch.sie_block->pp,
3179 (u64 __user *)reg->addr);
3180 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003181 case KVM_REG_S390_GBEA:
3182 r = put_user(vcpu->arch.sie_block->gbea,
3183 (u64 __user *)reg->addr);
3184 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003185 default:
3186 break;
3187 }
3188
3189 return r;
3190}
3191
3192static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3193 struct kvm_one_reg *reg)
3194{
3195 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003196 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003197
3198 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003199 case KVM_REG_S390_TODPR:
3200 r = get_user(vcpu->arch.sie_block->todpr,
3201 (u32 __user *)reg->addr);
3202 break;
3203 case KVM_REG_S390_EPOCHDIFF:
3204 r = get_user(vcpu->arch.sie_block->epoch,
3205 (u64 __user *)reg->addr);
3206 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003207 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003208 r = get_user(val, (u64 __user *)reg->addr);
3209 if (!r)
3210 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003211 break;
3212 case KVM_REG_S390_CLOCK_COMP:
3213 r = get_user(vcpu->arch.sie_block->ckc,
3214 (u64 __user *)reg->addr);
3215 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003216 case KVM_REG_S390_PFTOKEN:
3217 r = get_user(vcpu->arch.pfault_token,
3218 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003219 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3220 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003221 break;
3222 case KVM_REG_S390_PFCOMPARE:
3223 r = get_user(vcpu->arch.pfault_compare,
3224 (u64 __user *)reg->addr);
3225 break;
3226 case KVM_REG_S390_PFSELECT:
3227 r = get_user(vcpu->arch.pfault_select,
3228 (u64 __user *)reg->addr);
3229 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003230 case KVM_REG_S390_PP:
3231 r = get_user(vcpu->arch.sie_block->pp,
3232 (u64 __user *)reg->addr);
3233 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003234 case KVM_REG_S390_GBEA:
3235 r = get_user(vcpu->arch.sie_block->gbea,
3236 (u64 __user *)reg->addr);
3237 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003238 default:
3239 break;
3240 }
3241
3242 return r;
3243}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003244
Janosch Frank7de3f142020-01-31 05:02:02 -05003245static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003246{
Janosch Frank7de3f142020-01-31 05:02:02 -05003247 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3248 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3249 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3250
3251 kvm_clear_async_pf_completion_queue(vcpu);
3252 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3253 kvm_s390_vcpu_stop(vcpu);
3254 kvm_s390_clear_local_irqs(vcpu);
3255}
3256
3257static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3258{
3259 /* Initial reset is a superset of the normal reset */
3260 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3261
3262 /* this equals initial cpu reset in pop, but we don't switch to ESA */
3263 vcpu->arch.sie_block->gpsw.mask = 0;
3264 vcpu->arch.sie_block->gpsw.addr = 0;
3265 kvm_s390_set_prefix(vcpu, 0);
3266 kvm_s390_set_cpu_timer(vcpu, 0);
3267 vcpu->arch.sie_block->ckc = 0;
3268 vcpu->arch.sie_block->todpr = 0;
3269 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3270 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3271 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3272 vcpu->run->s.regs.fpc = 0;
3273 vcpu->arch.sie_block->gbea = 1;
3274 vcpu->arch.sie_block->pp = 0;
3275 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3276}
3277
3278static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3279{
3280 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3281
3282 /* Clear reset is a superset of the initial reset */
3283 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3284
3285 memset(&regs->gprs, 0, sizeof(regs->gprs));
3286 memset(&regs->vrs, 0, sizeof(regs->vrs));
3287 memset(&regs->acrs, 0, sizeof(regs->acrs));
3288 memset(&regs->gscb, 0, sizeof(regs->gscb));
3289
3290 regs->etoken = 0;
3291 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003292}
3293
3294int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3295{
Christoffer Dall875656f2017-12-04 21:35:27 +01003296 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003297 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003298 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003299 return 0;
3300}
3301
3302int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3303{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003304 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003305 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003306 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003307 return 0;
3308}
3309
3310int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3311 struct kvm_sregs *sregs)
3312{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003313 vcpu_load(vcpu);
3314
Christian Borntraeger59674c12012-01-11 11:20:33 +01003315 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003316 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003317
3318 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003319 return 0;
3320}
3321
3322int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3323 struct kvm_sregs *sregs)
3324{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003325 vcpu_load(vcpu);
3326
Christian Borntraeger59674c12012-01-11 11:20:33 +01003327 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003328 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003329
3330 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003331 return 0;
3332}
3333
3334int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3335{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003336 int ret = 0;
3337
3338 vcpu_load(vcpu);
3339
3340 if (test_fp_ctl(fpu->fpc)) {
3341 ret = -EINVAL;
3342 goto out;
3343 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003344 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003345 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003346 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3347 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003348 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003349 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003350
3351out:
3352 vcpu_put(vcpu);
3353 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003354}
3355
3356int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3357{
Christoffer Dall13931232017-12-04 21:35:34 +01003358 vcpu_load(vcpu);
3359
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003360 /* make sure we have the latest values */
3361 save_fpu_regs();
3362 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003363 convert_vx_to_fp((freg_t *) fpu->fprs,
3364 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003365 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003366 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003367 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003368
3369 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003370 return 0;
3371}
3372
3373static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3374{
3375 int rc = 0;
3376
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003377 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003378 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003379 else {
3380 vcpu->run->psw_mask = psw.mask;
3381 vcpu->run->psw_addr = psw.addr;
3382 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003383 return rc;
3384}
3385
3386int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3387 struct kvm_translation *tr)
3388{
3389 return -EINVAL; /* not implemented yet */
3390}
3391
David Hildenbrand27291e22014-01-23 12:26:52 +01003392#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3393 KVM_GUESTDBG_USE_HW_BP | \
3394 KVM_GUESTDBG_ENABLE)
3395
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003396int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3397 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003398{
David Hildenbrand27291e22014-01-23 12:26:52 +01003399 int rc = 0;
3400
Christoffer Dall66b56562017-12-04 21:35:33 +01003401 vcpu_load(vcpu);
3402
David Hildenbrand27291e22014-01-23 12:26:52 +01003403 vcpu->guest_debug = 0;
3404 kvm_s390_clear_bp_data(vcpu);
3405
Christoffer Dall66b56562017-12-04 21:35:33 +01003406 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3407 rc = -EINVAL;
3408 goto out;
3409 }
3410 if (!sclp.has_gpere) {
3411 rc = -EINVAL;
3412 goto out;
3413 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003414
3415 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3416 vcpu->guest_debug = dbg->control;
3417 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003418 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003419
3420 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3421 rc = kvm_s390_import_bp_data(vcpu, dbg);
3422 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003423 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003424 vcpu->arch.guestdbg.last_bp = 0;
3425 }
3426
3427 if (rc) {
3428 vcpu->guest_debug = 0;
3429 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003430 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003431 }
3432
Christoffer Dall66b56562017-12-04 21:35:33 +01003433out:
3434 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003435 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003436}
3437
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003438int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3439 struct kvm_mp_state *mp_state)
3440{
Christoffer Dallfd232562017-12-04 21:35:30 +01003441 int ret;
3442
3443 vcpu_load(vcpu);
3444
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003445 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003446 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3447 KVM_MP_STATE_OPERATING;
3448
3449 vcpu_put(vcpu);
3450 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003451}
3452
3453int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3454 struct kvm_mp_state *mp_state)
3455{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003456 int rc = 0;
3457
Christoffer Dalle83dff52017-12-04 21:35:31 +01003458 vcpu_load(vcpu);
3459
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003460 /* user space knows about this interface - let it control the state */
3461 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3462
3463 switch (mp_state->mp_state) {
3464 case KVM_MP_STATE_STOPPED:
3465 kvm_s390_vcpu_stop(vcpu);
3466 break;
3467 case KVM_MP_STATE_OPERATING:
3468 kvm_s390_vcpu_start(vcpu);
3469 break;
3470 case KVM_MP_STATE_LOAD:
3471 case KVM_MP_STATE_CHECK_STOP:
3472 /* fall through - CHECK_STOP and LOAD are not supported yet */
3473 default:
3474 rc = -ENXIO;
3475 }
3476
Christoffer Dalle83dff52017-12-04 21:35:31 +01003477 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003478 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003479}
3480
David Hildenbrand8ad35752014-03-14 11:00:21 +01003481static bool ibs_enabled(struct kvm_vcpu *vcpu)
3482{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003483 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003484}
3485
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003486static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3487{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003488retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003489 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003490 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003491 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003492 /*
3493 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003494 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003495 * This ensures that the ipte instruction for this request has
3496 * already finished. We might race against a second unmapper that
3497 * wants to set the blocking bit. Lets just retry the request loop.
3498 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003499 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003500 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003501 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3502 kvm_s390_get_prefix(vcpu),
3503 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003504 if (rc) {
3505 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003506 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003507 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003508 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003509 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003510
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003511 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3512 vcpu->arch.sie_block->ihcpu = 0xffff;
3513 goto retry;
3514 }
3515
David Hildenbrand8ad35752014-03-14 11:00:21 +01003516 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3517 if (!ibs_enabled(vcpu)) {
3518 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003519 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003520 }
3521 goto retry;
3522 }
3523
3524 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3525 if (ibs_enabled(vcpu)) {
3526 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003527 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003528 }
3529 goto retry;
3530 }
3531
David Hildenbrand6502a342016-06-21 14:19:51 +02003532 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3533 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3534 goto retry;
3535 }
3536
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003537 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3538 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003539 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003540 * instruction manually, in order to provide additional
3541 * functionalities needed for live migration.
3542 */
3543 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3544 goto retry;
3545 }
3546
3547 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3548 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003549 * Re-enable CMM virtualization if CMMA is available and
3550 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003551 */
3552 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003553 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003554 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3555 goto retry;
3556 }
3557
David Hildenbrand0759d062014-05-13 16:54:32 +02003558 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003559 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003560 /* we left the vsie handler, nothing to do, just clear the request */
3561 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003562
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003563 return 0;
3564}
3565
David Hildenbrand0e7def52018-02-07 12:46:43 +01003566void kvm_s390_set_tod_clock(struct kvm *kvm,
3567 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003568{
3569 struct kvm_vcpu *vcpu;
3570 struct kvm_s390_tod_clock_ext htod;
3571 int i;
3572
3573 mutex_lock(&kvm->lock);
3574 preempt_disable();
3575
3576 get_tod_clock_ext((char *)&htod);
3577
3578 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003579 kvm->arch.epdx = 0;
3580 if (test_kvm_facility(kvm, 139)) {
3581 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3582 if (kvm->arch.epoch > gtod->tod)
3583 kvm->arch.epdx -= 1;
3584 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003585
3586 kvm_s390_vcpu_block_all(kvm);
3587 kvm_for_each_vcpu(i, vcpu, kvm) {
3588 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3589 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3590 }
3591
3592 kvm_s390_vcpu_unblock_all(kvm);
3593 preempt_enable();
3594 mutex_unlock(&kvm->lock);
3595}
3596
Thomas Huthfa576c52014-05-06 17:20:16 +02003597/**
3598 * kvm_arch_fault_in_page - fault-in guest page if necessary
3599 * @vcpu: The corresponding virtual cpu
3600 * @gpa: Guest physical address
3601 * @writable: Whether the page should be writable or not
3602 *
3603 * Make sure that a guest page has been faulted-in on the host.
3604 *
3605 * Return: Zero on success, negative error code otherwise.
3606 */
3607long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003608{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003609 return gmap_fault(vcpu->arch.gmap, gpa,
3610 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003611}
3612
Dominik Dingel3c038e62013-10-07 17:11:48 +02003613static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3614 unsigned long token)
3615{
3616 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003617 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003618
3619 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003620 irq.u.ext.ext_params2 = token;
3621 irq.type = KVM_S390_INT_PFAULT_INIT;
3622 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003623 } else {
3624 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003625 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003626 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3627 }
3628}
3629
3630void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3631 struct kvm_async_pf *work)
3632{
3633 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3634 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3635}
3636
3637void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3638 struct kvm_async_pf *work)
3639{
3640 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3641 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3642}
3643
3644void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3645 struct kvm_async_pf *work)
3646{
3647 /* s390 will always inject the page directly */
3648}
3649
3650bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3651{
3652 /*
3653 * s390 will always inject the page directly,
3654 * but we still want check_async_completion to cleanup
3655 */
3656 return true;
3657}
3658
3659static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3660{
3661 hva_t hva;
3662 struct kvm_arch_async_pf arch;
3663 int rc;
3664
3665 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3666 return 0;
3667 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3668 vcpu->arch.pfault_compare)
3669 return 0;
3670 if (psw_extint_disabled(vcpu))
3671 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003672 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003673 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003674 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003675 return 0;
3676 if (!vcpu->arch.gmap->pfault_enabled)
3677 return 0;
3678
Heiko Carstens81480cc2014-01-01 16:36:07 +01003679 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3680 hva += current->thread.gmap_addr & ~PAGE_MASK;
3681 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003682 return 0;
3683
3684 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3685 return rc;
3686}
3687
Thomas Huth3fb4c402013-09-12 10:33:43 +02003688static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003689{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003690 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003691
Dominik Dingel3c038e62013-10-07 17:11:48 +02003692 /*
3693 * On s390 notifications for arriving pages will be delivered directly
3694 * to the guest but the house keeping for completed pfaults is
3695 * handled outside the worker.
3696 */
3697 kvm_check_async_pf_completion(vcpu);
3698
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003699 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3700 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003701
3702 if (need_resched())
3703 schedule();
3704
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003705 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003706 s390_handle_mcck();
3707
Jens Freimann79395032014-04-17 10:10:30 +02003708 if (!kvm_is_ucontrol(vcpu->kvm)) {
3709 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3710 if (rc)
3711 return rc;
3712 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003713
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003714 rc = kvm_s390_handle_requests(vcpu);
3715 if (rc)
3716 return rc;
3717
David Hildenbrand27291e22014-01-23 12:26:52 +01003718 if (guestdbg_enabled(vcpu)) {
3719 kvm_s390_backup_guest_per_regs(vcpu);
3720 kvm_s390_patch_guest_per_regs(vcpu);
3721 }
3722
Michael Mueller9f30f622019-01-31 09:52:44 +01003723 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
3724
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003725 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003726 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3727 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3728 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003729
Thomas Huth3fb4c402013-09-12 10:33:43 +02003730 return 0;
3731}
3732
Thomas Huth492d8642015-02-10 16:11:01 +01003733static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3734{
David Hildenbrand56317922016-01-12 17:37:58 +01003735 struct kvm_s390_pgm_info pgm_info = {
3736 .code = PGM_ADDRESSING,
3737 };
3738 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003739 int rc;
3740
3741 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3742 trace_kvm_s390_sie_fault(vcpu);
3743
3744 /*
3745 * We want to inject an addressing exception, which is defined as a
3746 * suppressing or terminating exception. However, since we came here
3747 * by a DAT access exception, the PSW still points to the faulting
3748 * instruction since DAT exceptions are nullifying. So we've got
3749 * to look up the current opcode to get the length of the instruction
3750 * to be able to forward the PSW.
3751 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003752 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003753 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003754 if (rc < 0) {
3755 return rc;
3756 } else if (rc) {
3757 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3758 * Forward by arbitrary ilc, injection will take care of
3759 * nullification if necessary.
3760 */
3761 pgm_info = vcpu->arch.pgm;
3762 ilen = 4;
3763 }
David Hildenbrand56317922016-01-12 17:37:58 +01003764 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3765 kvm_s390_forward_psw(vcpu, ilen);
3766 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003767}
3768
Thomas Huth3fb4c402013-09-12 10:33:43 +02003769static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3770{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003771 struct mcck_volatile_info *mcck_info;
3772 struct sie_page *sie_page;
3773
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003774 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3775 vcpu->arch.sie_block->icptcode);
3776 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3777
David Hildenbrand27291e22014-01-23 12:26:52 +01003778 if (guestdbg_enabled(vcpu))
3779 kvm_s390_restore_guest_per_regs(vcpu);
3780
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003781 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3782 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003783
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003784 if (exit_reason == -EINTR) {
3785 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3786 sie_page = container_of(vcpu->arch.sie_block,
3787 struct sie_page, sie_block);
3788 mcck_info = &sie_page->mcck_info;
3789 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3790 return 0;
3791 }
3792
David Hildenbrand71f116b2015-10-19 16:24:28 +02003793 if (vcpu->arch.sie_block->icptcode > 0) {
3794 int rc = kvm_handle_sie_intercept(vcpu);
3795
3796 if (rc != -EOPNOTSUPP)
3797 return rc;
3798 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3799 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3800 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3801 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3802 return -EREMOTE;
3803 } else if (exit_reason != -EFAULT) {
3804 vcpu->stat.exit_null++;
3805 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003806 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3807 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3808 vcpu->run->s390_ucontrol.trans_exc_code =
3809 current->thread.gmap_addr;
3810 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003811 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003812 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003813 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003814 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003815 if (kvm_arch_setup_async_pf(vcpu))
3816 return 0;
3817 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003818 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003819 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003820}
3821
3822static int __vcpu_run(struct kvm_vcpu *vcpu)
3823{
3824 int rc, exit_reason;
3825
Thomas Huth800c1062013-09-12 10:33:45 +02003826 /*
3827 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3828 * ning the guest), so that memslots (and other stuff) are protected
3829 */
3830 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3831
Thomas Hutha76ccff2013-09-12 10:33:44 +02003832 do {
3833 rc = vcpu_pre_run(vcpu);
3834 if (rc)
3835 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003836
Thomas Huth800c1062013-09-12 10:33:45 +02003837 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003838 /*
3839 * As PF_VCPU will be used in fault handler, between
3840 * guest_enter and guest_exit should be no uaccess.
3841 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003842 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003843 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003844 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003845 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003846 exit_reason = sie64a(vcpu->arch.sie_block,
3847 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003848 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003849 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003850 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003851 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003852 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003853
Thomas Hutha76ccff2013-09-12 10:33:44 +02003854 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003855 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003856
Thomas Huth800c1062013-09-12 10:33:45 +02003857 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003858 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003859}
3860
David Hildenbrandb028ee32014-07-17 10:47:43 +02003861static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3862{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003863 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003864 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003865
3866 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003867 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003868 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3869 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3870 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3871 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3872 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3873 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003874 /* some control register changes require a tlb flush */
3875 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003876 }
3877 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003878 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003879 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3880 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3881 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3882 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3883 }
3884 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3885 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3886 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3887 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003888 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3889 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003890 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003891 /*
3892 * If userspace sets the riccb (e.g. after migration) to a valid state,
3893 * we should enable RI here instead of doing the lazy enablement.
3894 */
3895 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003896 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003897 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003898 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003899 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003900 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003901 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003902 /*
3903 * If userspace sets the gscb (e.g. after migration) to non-zero,
3904 * we should enable GS here instead of doing the lazy enablement.
3905 */
3906 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3907 test_kvm_facility(vcpu->kvm, 133) &&
3908 gscb->gssm &&
3909 !vcpu->arch.gs_enabled) {
3910 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3911 vcpu->arch.sie_block->ecb |= ECB_GS;
3912 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3913 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003914 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003915 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3916 test_kvm_facility(vcpu->kvm, 82)) {
3917 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3918 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3919 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003920 save_access_regs(vcpu->arch.host_acrs);
3921 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003922 /* save host (userspace) fprs/vrs */
3923 save_fpu_regs();
3924 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3925 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3926 if (MACHINE_HAS_VX)
3927 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3928 else
3929 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3930 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3931 if (test_fp_ctl(current->thread.fpu.fpc))
3932 /* User space provided an invalid FPC, let's clear it */
3933 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003934 if (MACHINE_HAS_GS) {
3935 preempt_disable();
3936 __ctl_set_bit(2, 4);
3937 if (current->thread.gs_cb) {
3938 vcpu->arch.host_gscb = current->thread.gs_cb;
3939 save_gs_cb(vcpu->arch.host_gscb);
3940 }
3941 if (vcpu->arch.gs_enabled) {
3942 current->thread.gs_cb = (struct gs_cb *)
3943 &vcpu->run->s.regs.gscb;
3944 restore_gs_cb(current->thread.gs_cb);
3945 }
3946 preempt_enable();
3947 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003948 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Fan Zhang80cd8762016-08-15 04:53:22 +02003949
David Hildenbrandb028ee32014-07-17 10:47:43 +02003950 kvm_run->kvm_dirty_regs = 0;
3951}
3952
3953static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3954{
3955 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3956 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3957 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3958 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003959 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003960 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3961 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3962 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3963 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3964 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3965 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3966 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01003967 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003968 save_access_regs(vcpu->run->s.regs.acrs);
3969 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003970 /* Save guest register state */
3971 save_fpu_regs();
3972 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3973 /* Restore will be done lazily at return */
3974 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3975 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003976 if (MACHINE_HAS_GS) {
3977 __ctl_set_bit(2, 4);
3978 if (vcpu->arch.gs_enabled)
3979 save_gs_cb(current->thread.gs_cb);
3980 preempt_disable();
3981 current->thread.gs_cb = vcpu->arch.host_gscb;
3982 restore_gs_cb(vcpu->arch.host_gscb);
3983 preempt_enable();
3984 if (!vcpu->arch.host_gscb)
3985 __ctl_clear_bit(2, 4);
3986 vcpu->arch.host_gscb = NULL;
3987 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003988 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02003989}
3990
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003991int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3992{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003993 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003994
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003995 if (kvm_run->immediate_exit)
3996 return -EINTR;
3997
Thomas Huth200824f2019-09-04 10:51:59 +02003998 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
3999 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4000 return -EINVAL;
4001
Christoffer Dallaccb7572017-12-04 21:35:25 +01004002 vcpu_load(vcpu);
4003
David Hildenbrand27291e22014-01-23 12:26:52 +01004004 if (guestdbg_exit_pending(vcpu)) {
4005 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004006 rc = 0;
4007 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004008 }
4009
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004010 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004011
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004012 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4013 kvm_s390_vcpu_start(vcpu);
4014 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004015 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004016 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004017 rc = -EINVAL;
4018 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004019 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004020
David Hildenbrandb028ee32014-07-17 10:47:43 +02004021 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004022 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004023
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004024 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004025 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004026
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004027 if (signal_pending(current) && !rc) {
4028 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004029 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004030 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004031
David Hildenbrand27291e22014-01-23 12:26:52 +01004032 if (guestdbg_exit_pending(vcpu) && !rc) {
4033 kvm_s390_prepare_debug_exit(vcpu);
4034 rc = 0;
4035 }
4036
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004037 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004038 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004039 rc = 0;
4040 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004041
David Hildenbranddb0758b2016-02-15 09:42:25 +01004042 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004043 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004044
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004045 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004046
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004047 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004048out:
4049 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004050 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004051}
4052
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004053/*
4054 * store status at address
4055 * we use have two special cases:
4056 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4057 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4058 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004059int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004060{
Carsten Otte092670c2011-07-24 10:48:22 +02004061 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004062 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004063 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004064 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004065 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004066
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004067 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004068 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4069 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004070 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004071 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004072 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4073 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004074 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004075 gpa = px;
4076 } else
4077 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004078
4079 /* manually convert vector registers if necessary */
4080 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004081 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004082 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4083 fprs, 128);
4084 } else {
4085 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004086 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004087 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004088 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004089 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004090 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004091 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004092 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004093 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004094 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004095 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004096 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004097 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004098 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004099 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004100 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004101 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004102 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004103 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004104 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004105 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004106 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004107 &vcpu->arch.sie_block->gcr, 128);
4108 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004109}
4110
Thomas Huthe8798922013-11-06 15:46:33 +01004111int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4112{
4113 /*
4114 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004115 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004116 * it into the save area
4117 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004118 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004119 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004120 save_access_regs(vcpu->run->s.regs.acrs);
4121
4122 return kvm_s390_store_status_unloaded(vcpu, addr);
4123}
4124
David Hildenbrand8ad35752014-03-14 11:00:21 +01004125static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4126{
4127 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004128 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004129}
4130
4131static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4132{
4133 unsigned int i;
4134 struct kvm_vcpu *vcpu;
4135
4136 kvm_for_each_vcpu(i, vcpu, kvm) {
4137 __disable_ibs_on_vcpu(vcpu);
4138 }
4139}
4140
4141static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4142{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004143 if (!sclp.has_ibs)
4144 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004145 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004146 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004147}
4148
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004149void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
4150{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004151 int i, online_vcpus, started_vcpus = 0;
4152
4153 if (!is_vcpu_stopped(vcpu))
4154 return;
4155
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004156 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004157 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004158 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004159 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4160
4161 for (i = 0; i < online_vcpus; i++) {
4162 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4163 started_vcpus++;
4164 }
4165
4166 if (started_vcpus == 0) {
4167 /* we're the only active VCPU -> speed it up */
4168 __enable_ibs_on_vcpu(vcpu);
4169 } else if (started_vcpus == 1) {
4170 /*
4171 * As we are starting a second VCPU, we have to disable
4172 * the IBS facility on all VCPUs to remove potentially
4173 * oustanding ENABLE requests.
4174 */
4175 __disable_ibs_on_all_vcpus(vcpu->kvm);
4176 }
4177
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004178 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004179 /*
4180 * Another VCPU might have used IBS while we were offline.
4181 * Let's play safe and flush the VCPU at startup.
4182 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004183 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004184 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004185 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004186}
4187
4188void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
4189{
David Hildenbrand8ad35752014-03-14 11:00:21 +01004190 int i, online_vcpus, started_vcpus = 0;
4191 struct kvm_vcpu *started_vcpu = NULL;
4192
4193 if (is_vcpu_stopped(vcpu))
4194 return;
4195
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004196 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004197 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004198 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004199 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4200
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004201 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004202 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004203
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004204 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004205 __disable_ibs_on_vcpu(vcpu);
4206
4207 for (i = 0; i < online_vcpus; i++) {
4208 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4209 started_vcpus++;
4210 started_vcpu = vcpu->kvm->vcpus[i];
4211 }
4212 }
4213
4214 if (started_vcpus == 1) {
4215 /*
4216 * As we only have one VCPU left, we want to enable the
4217 * IBS facility for that VCPU to speed it up.
4218 */
4219 __enable_ibs_on_vcpu(started_vcpu);
4220 }
4221
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004222 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004223 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004224}
4225
Cornelia Huckd6712df2012-12-20 15:32:11 +01004226static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4227 struct kvm_enable_cap *cap)
4228{
4229 int r;
4230
4231 if (cap->flags)
4232 return -EINVAL;
4233
4234 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004235 case KVM_CAP_S390_CSS_SUPPORT:
4236 if (!vcpu->kvm->arch.css_support) {
4237 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004238 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004239 trace_kvm_s390_enable_css(vcpu->kvm);
4240 }
4241 r = 0;
4242 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004243 default:
4244 r = -EINVAL;
4245 break;
4246 }
4247 return r;
4248}
4249
Thomas Huth41408c282015-02-06 15:01:21 +01004250static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4251 struct kvm_s390_mem_op *mop)
4252{
4253 void __user *uaddr = (void __user *)mop->buf;
4254 void *tmpbuf = NULL;
4255 int r, srcu_idx;
4256 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4257 | KVM_S390_MEMOP_F_CHECK_ONLY;
4258
Thomas Hutha13b03b2019-08-29 14:25:17 +02004259 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004260 return -EINVAL;
4261
4262 if (mop->size > MEM_OP_MAX_SIZE)
4263 return -E2BIG;
4264
4265 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4266 tmpbuf = vmalloc(mop->size);
4267 if (!tmpbuf)
4268 return -ENOMEM;
4269 }
4270
4271 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4272
4273 switch (mop->op) {
4274 case KVM_S390_MEMOP_LOGICAL_READ:
4275 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004276 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4277 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004278 break;
4279 }
4280 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4281 if (r == 0) {
4282 if (copy_to_user(uaddr, tmpbuf, mop->size))
4283 r = -EFAULT;
4284 }
4285 break;
4286 case KVM_S390_MEMOP_LOGICAL_WRITE:
4287 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004288 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4289 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004290 break;
4291 }
4292 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4293 r = -EFAULT;
4294 break;
4295 }
4296 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4297 break;
4298 default:
4299 r = -EINVAL;
4300 }
4301
4302 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4303
4304 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4305 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4306
4307 vfree(tmpbuf);
4308 return r;
4309}
4310
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004311long kvm_arch_vcpu_async_ioctl(struct file *filp,
4312 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004313{
4314 struct kvm_vcpu *vcpu = filp->private_data;
4315 void __user *argp = (void __user *)arg;
4316
Avi Kivity93736622010-05-13 12:35:17 +03004317 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004318 case KVM_S390_IRQ: {
4319 struct kvm_s390_irq s390irq;
4320
Jens Freimann47b43c52014-11-11 20:57:06 +01004321 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004322 return -EFAULT;
4323 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004324 }
Avi Kivity93736622010-05-13 12:35:17 +03004325 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004326 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004327 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004328
4329 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004330 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004331 if (s390int_to_s390irq(&s390int, &s390irq))
4332 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004333 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004334 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004335 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004336 return -ENOIOCTLCMD;
4337}
4338
4339long kvm_arch_vcpu_ioctl(struct file *filp,
4340 unsigned int ioctl, unsigned long arg)
4341{
4342 struct kvm_vcpu *vcpu = filp->private_data;
4343 void __user *argp = (void __user *)arg;
4344 int idx;
4345 long r;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004346
4347 vcpu_load(vcpu);
4348
4349 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004350 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004351 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004352 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004353 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004354 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004355 case KVM_S390_SET_INITIAL_PSW: {
4356 psw_t psw;
4357
Avi Kivitybc923cc2010-05-13 12:21:46 +03004358 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004359 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004360 break;
4361 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4362 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004363 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004364 case KVM_S390_CLEAR_RESET:
4365 r = 0;
4366 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
4367 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004368 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004369 r = 0;
4370 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4371 break;
4372 case KVM_S390_NORMAL_RESET:
4373 r = 0;
4374 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004375 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004376 case KVM_SET_ONE_REG:
4377 case KVM_GET_ONE_REG: {
4378 struct kvm_one_reg reg;
4379 r = -EFAULT;
4380 if (copy_from_user(&reg, argp, sizeof(reg)))
4381 break;
4382 if (ioctl == KVM_SET_ONE_REG)
4383 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4384 else
4385 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4386 break;
4387 }
Carsten Otte27e03932012-01-04 10:25:21 +01004388#ifdef CONFIG_KVM_S390_UCONTROL
4389 case KVM_S390_UCAS_MAP: {
4390 struct kvm_s390_ucas_mapping ucasmap;
4391
4392 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4393 r = -EFAULT;
4394 break;
4395 }
4396
4397 if (!kvm_is_ucontrol(vcpu->kvm)) {
4398 r = -EINVAL;
4399 break;
4400 }
4401
4402 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4403 ucasmap.vcpu_addr, ucasmap.length);
4404 break;
4405 }
4406 case KVM_S390_UCAS_UNMAP: {
4407 struct kvm_s390_ucas_mapping ucasmap;
4408
4409 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4410 r = -EFAULT;
4411 break;
4412 }
4413
4414 if (!kvm_is_ucontrol(vcpu->kvm)) {
4415 r = -EINVAL;
4416 break;
4417 }
4418
4419 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4420 ucasmap.length);
4421 break;
4422 }
4423#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004424 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004425 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004426 break;
4427 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004428 case KVM_ENABLE_CAP:
4429 {
4430 struct kvm_enable_cap cap;
4431 r = -EFAULT;
4432 if (copy_from_user(&cap, argp, sizeof(cap)))
4433 break;
4434 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4435 break;
4436 }
Thomas Huth41408c282015-02-06 15:01:21 +01004437 case KVM_S390_MEM_OP: {
4438 struct kvm_s390_mem_op mem_op;
4439
4440 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4441 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4442 else
4443 r = -EFAULT;
4444 break;
4445 }
Jens Freimann816c7662014-11-24 17:13:46 +01004446 case KVM_S390_SET_IRQ_STATE: {
4447 struct kvm_s390_irq_state irq_state;
4448
4449 r = -EFAULT;
4450 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4451 break;
4452 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4453 irq_state.len == 0 ||
4454 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4455 r = -EINVAL;
4456 break;
4457 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004458 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004459 r = kvm_s390_set_irq_state(vcpu,
4460 (void __user *) irq_state.buf,
4461 irq_state.len);
4462 break;
4463 }
4464 case KVM_S390_GET_IRQ_STATE: {
4465 struct kvm_s390_irq_state irq_state;
4466
4467 r = -EFAULT;
4468 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4469 break;
4470 if (irq_state.len == 0) {
4471 r = -EINVAL;
4472 break;
4473 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004474 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004475 r = kvm_s390_get_irq_state(vcpu,
4476 (__u8 __user *) irq_state.buf,
4477 irq_state.len);
4478 break;
4479 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004480 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004481 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004482 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004483
4484 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004485 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004486}
4487
Souptick Joarder1499fa82018-04-19 00:49:58 +05304488vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004489{
4490#ifdef CONFIG_KVM_S390_UCONTROL
4491 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4492 && (kvm_is_ucontrol(vcpu->kvm))) {
4493 vmf->page = virt_to_page(vcpu->arch.sie_block);
4494 get_page(vmf->page);
4495 return 0;
4496 }
4497#endif
4498 return VM_FAULT_SIGBUS;
4499}
4500
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004501/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004502int kvm_arch_prepare_memory_region(struct kvm *kvm,
4503 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004504 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004505 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004506{
Nick Wangdd2887e2013-03-25 17:22:57 +01004507 /* A few sanity checks. We can have memory slots which have to be
4508 located/ended at a segment boundary (1MB). The memory in userland is
4509 ok to be fragmented into various different vmas. It is okay to mmap()
4510 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004511
Carsten Otte598841c2011-07-24 10:48:21 +02004512 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004513 return -EINVAL;
4514
Carsten Otte598841c2011-07-24 10:48:21 +02004515 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004516 return -EINVAL;
4517
Dominik Dingela3a92c32014-12-01 17:24:42 +01004518 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4519 return -EINVAL;
4520
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004521 return 0;
4522}
4523
4524void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004525 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08004526 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004527 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004528 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004529{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004530 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004531
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004532 switch (change) {
4533 case KVM_MR_DELETE:
4534 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4535 old->npages * PAGE_SIZE);
4536 break;
4537 case KVM_MR_MOVE:
4538 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4539 old->npages * PAGE_SIZE);
4540 if (rc)
4541 break;
4542 /* FALLTHROUGH */
4543 case KVM_MR_CREATE:
4544 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4545 mem->guest_phys_addr, mem->memory_size);
4546 break;
4547 case KVM_MR_FLAGS_ONLY:
4548 break;
4549 default:
4550 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4551 }
Carsten Otte598841c2011-07-24 10:48:21 +02004552 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004553 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004554 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004555}
4556
Alexander Yarygin60a37702016-04-01 15:38:57 +03004557static inline unsigned long nonhyp_mask(int i)
4558{
4559 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4560
4561 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4562}
4563
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004564void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4565{
4566 vcpu->valid_wakeup = false;
4567}
4568
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004569static int __init kvm_s390_init(void)
4570{
Alexander Yarygin60a37702016-04-01 15:38:57 +03004571 int i;
4572
David Hildenbrand07197fd2015-01-30 16:01:38 +01004573 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004574 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01004575 return -ENODEV;
4576 }
4577
Janosch Franka4499382018-07-13 11:28:31 +01004578 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01004579 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01004580 return -EINVAL;
4581 }
4582
Alexander Yarygin60a37702016-04-01 15:38:57 +03004583 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00004584 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03004585 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4586
Michael Mueller9d8d5782015-02-02 15:42:51 +01004587 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004588}
4589
4590static void __exit kvm_s390_exit(void)
4591{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004592 kvm_exit();
4593}
4594
4595module_init(kvm_s390_init);
4596module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02004597
4598/*
4599 * Enable autoloading of the kvm module.
4600 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4601 * since x86 takes a different approach.
4602 */
4603#include <linux/miscdevice.h>
4604MODULE_ALIAS_MISCDEV(KVM_MINOR);
4605MODULE_ALIAS("devname:kvm");