blob: c7c7a28af41cb53d53225c9ff4b2a5bbf9c7dbda [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
69static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
70 sizeof(struct kvm_vm_stat) / sizeof(u64));
71
72const struct kvm_stats_header kvm_vm_stats_header = {
73 .name_size = KVM_STATS_NAME_SIZE,
74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75 .id_offset = sizeof(struct kvm_stats_header),
76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78 sizeof(kvm_vm_stats_desc),
79};
80
Heiko Carstensb0c632d2008-03-25 18:47:20 +010081struct kvm_stats_debugfs_item debugfs_entries[] = {
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020082 VCPU_STAT("userspace_handled", exit_userspace),
83 VCPU_STAT("exit_null", exit_null),
Christian Borntraeger50a05be2020-11-25 10:06:58 +010084 VCPU_STAT("pfault_sync", pfault_sync),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020085 VCPU_STAT("exit_validity", exit_validity),
86 VCPU_STAT("exit_stop_request", exit_stop_request),
87 VCPU_STAT("exit_external_request", exit_external_request),
88 VCPU_STAT("exit_io_request", exit_io_request),
89 VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
90 VCPU_STAT("exit_instruction", exit_instruction),
91 VCPU_STAT("exit_pei", exit_pei),
92 VCPU_STAT("exit_program_interruption", exit_program_interruption),
93 VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
94 VCPU_STAT("exit_operation_exception", exit_operation_exception),
Jing Zhang0193cc92021-06-18 22:27:03 +000095 VCPU_STAT("halt_successful_poll", generic.halt_successful_poll),
96 VCPU_STAT("halt_attempted_poll", generic.halt_attempted_poll),
97 VCPU_STAT("halt_poll_invalid", generic.halt_poll_invalid),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020098 VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
Jing Zhang0193cc92021-06-18 22:27:03 +000099 VCPU_STAT("halt_wakeup", generic.halt_wakeup),
100 VCPU_STAT("halt_poll_success_ns", generic.halt_poll_success_ns),
101 VCPU_STAT("halt_poll_fail_ns", generic.halt_poll_fail_ns),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +0200102 VCPU_STAT("instruction_lctlg", instruction_lctlg),
103 VCPU_STAT("instruction_lctl", instruction_lctl),
104 VCPU_STAT("instruction_stctl", instruction_stctl),
105 VCPU_STAT("instruction_stctg", instruction_stctg),
106 VCPU_STAT("deliver_ckc", deliver_ckc),
107 VCPU_STAT("deliver_cputm", deliver_cputm),
108 VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
109 VCPU_STAT("deliver_external_call", deliver_external_call),
110 VCPU_STAT("deliver_service_signal", deliver_service_signal),
111 VCPU_STAT("deliver_virtio", deliver_virtio),
112 VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
113 VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
114 VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
115 VCPU_STAT("deliver_program", deliver_program),
116 VCPU_STAT("deliver_io", deliver_io),
117 VCPU_STAT("deliver_machine_check", deliver_machine_check),
118 VCPU_STAT("exit_wait_state", exit_wait_state),
119 VCPU_STAT("inject_ckc", inject_ckc),
120 VCPU_STAT("inject_cputm", inject_cputm),
121 VCPU_STAT("inject_external_call", inject_external_call),
122 VM_STAT("inject_float_mchk", inject_float_mchk),
123 VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
124 VM_STAT("inject_io", inject_io),
125 VCPU_STAT("inject_mchk", inject_mchk),
126 VM_STAT("inject_pfault_done", inject_pfault_done),
127 VCPU_STAT("inject_program", inject_program),
128 VCPU_STAT("inject_restart", inject_restart),
129 VM_STAT("inject_service_signal", inject_service_signal),
130 VCPU_STAT("inject_set_prefix", inject_set_prefix),
131 VCPU_STAT("inject_stop_signal", inject_stop_signal),
132 VCPU_STAT("inject_pfault_init", inject_pfault_init),
133 VM_STAT("inject_virtio", inject_virtio),
134 VCPU_STAT("instruction_epsw", instruction_epsw),
135 VCPU_STAT("instruction_gs", instruction_gs),
136 VCPU_STAT("instruction_io_other", instruction_io_other),
137 VCPU_STAT("instruction_lpsw", instruction_lpsw),
138 VCPU_STAT("instruction_lpswe", instruction_lpswe),
139 VCPU_STAT("instruction_pfmf", instruction_pfmf),
140 VCPU_STAT("instruction_ptff", instruction_ptff),
141 VCPU_STAT("instruction_stidp", instruction_stidp),
142 VCPU_STAT("instruction_sck", instruction_sck),
143 VCPU_STAT("instruction_sckpf", instruction_sckpf),
144 VCPU_STAT("instruction_spx", instruction_spx),
145 VCPU_STAT("instruction_stpx", instruction_stpx),
146 VCPU_STAT("instruction_stap", instruction_stap),
147 VCPU_STAT("instruction_iske", instruction_iske),
148 VCPU_STAT("instruction_ri", instruction_ri),
149 VCPU_STAT("instruction_rrbe", instruction_rrbe),
150 VCPU_STAT("instruction_sske", instruction_sske),
151 VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
152 VCPU_STAT("instruction_essa", instruction_essa),
153 VCPU_STAT("instruction_stsi", instruction_stsi),
154 VCPU_STAT("instruction_stfl", instruction_stfl),
155 VCPU_STAT("instruction_tb", instruction_tb),
156 VCPU_STAT("instruction_tpi", instruction_tpi),
157 VCPU_STAT("instruction_tprot", instruction_tprot),
158 VCPU_STAT("instruction_tsch", instruction_tsch),
159 VCPU_STAT("instruction_sthyi", instruction_sthyi),
160 VCPU_STAT("instruction_sie", instruction_sie),
161 VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
162 VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
163 VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
164 VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
165 VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
166 VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
167 VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
168 VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
169 VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
170 VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
171 VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
172 VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
173 VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
174 VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
175 VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
176 VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
177 VCPU_STAT("instruction_diag_10", diagnose_10),
178 VCPU_STAT("instruction_diag_44", diagnose_44),
179 VCPU_STAT("instruction_diag_9c", diagnose_9c),
180 VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
Pierre Morel87e28a12020-09-07 15:26:07 +0200181 VCPU_STAT("diag_9c_forward", diagnose_9c_forward),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +0200182 VCPU_STAT("instruction_diag_258", diagnose_258),
183 VCPU_STAT("instruction_diag_308", diagnose_308),
184 VCPU_STAT("instruction_diag_500", diagnose_500),
185 VCPU_STAT("instruction_diag_other", diagnose_other),
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100186 { NULL }
187};
188
David Hildenbranda411edf2016-02-02 15:41:22 +0100189/* allow nested virtualization in KVM (if enabled by user space) */
190static int nested;
191module_param(nested, int, S_IRUGO);
192MODULE_PARM_DESC(nested, "Nested virtualization support");
193
Janosch Franka4499382018-07-13 11:28:31 +0100194/* allow 1m huge page guest backing, if !nested */
195static int hpage;
196module_param(hpage, int, 0444);
197MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199/* maximum percentage of steal time for polling. >100 is treated like 100 */
200static u8 halt_poll_max_steal = 10;
201module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000202MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500203
Michael Muellercc674ef2020-02-27 10:10:31 +0100204/* if set to true, the GISA will be initialized and used if available */
205static bool use_gisa = true;
206module_param(use_gisa, bool, 0644);
207MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
208
Pierre Morel87e28a12020-09-07 15:26:07 +0200209/* maximum diag9c forwarding per second */
210unsigned int diag9c_forwarding_hz;
211module_param(diag9c_forwarding_hz, uint, 0644);
212MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
213
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000214/*
215 * For now we handle at most 16 double words as this is what the s390 base
216 * kernel handles and stores in the prefix page. If we ever need to go beyond
217 * this, this requires changes to code, but the external uapi can stay.
218 */
219#define SIZE_INTERNAL 16
220
221/*
222 * Base feature mask that defines default mask for facilities. Consists of the
223 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
224 */
225static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
226/*
227 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
228 * and defines the facilities that can be enabled via a cpu model.
229 */
230static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
231
232static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200233{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
235 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
236 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
237 sizeof(S390_lowcore.stfle_fac_list));
238
239 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200240}
241
David Hildenbrand15c97052015-03-19 17:36:43 +0100242/* available cpu features supported by kvm */
243static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200244/* available subfunctions indicated via query / "test bit" */
245static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100246
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200248static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200249debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400250debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100251
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100252/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200253int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100254{
255 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200256 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257}
258
Sean Christophersonb9904082020-03-21 13:25:55 -0700259int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700260{
261 return 0;
262}
263
Janosch Frank29b40f12019-09-30 04:19:18 -0400264/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100265static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
266 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400267static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200268
David Hildenbrand15757672018-02-07 12:46:45 +0100269static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
270{
271 u8 delta_idx = 0;
272
273 /*
274 * The TOD jumps by delta, we have to compensate this by adding
275 * -delta to the epoch.
276 */
277 delta = -delta;
278
279 /* sign-extension - we're adding to signed values below */
280 if ((s64)delta < 0)
281 delta_idx = -1;
282
283 scb->epoch += delta;
284 if (scb->ecd & ECD_MEF) {
285 scb->epdx += delta_idx;
286 if (scb->epoch < delta)
287 scb->epdx += 1;
288 }
289}
290
Fan Zhangfdf03652015-05-13 10:58:41 +0200291/*
292 * This callback is executed during stop_machine(). All CPUs are therefore
293 * temporarily stopped. In order not to change guest behavior, we have to
294 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
295 * so a CPU won't be stopped while calculating with the epoch.
296 */
297static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
298 void *v)
299{
300 struct kvm *kvm;
301 struct kvm_vcpu *vcpu;
302 int i;
303 unsigned long long *delta = v;
304
305 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200306 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100307 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
308 if (i == 0) {
309 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
310 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
311 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100312 if (vcpu->arch.cputm_enabled)
313 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100314 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100315 kvm_clock_sync_scb(vcpu->arch.vsie_block,
316 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200317 }
318 }
319 return NOTIFY_OK;
320}
321
322static struct notifier_block kvm_clock_notifier = {
323 .notifier_call = kvm_clock_sync,
324};
325
Sean Christophersonb9904082020-03-21 13:25:55 -0700326int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100327{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200328 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100329 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200330 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
331 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200332 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
333 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100334 return 0;
335}
336
337void kvm_arch_hardware_unsetup(void)
338{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100339 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200340 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200341 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
342 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100343}
344
David Hildenbrand22be5a132016-01-21 13:22:54 +0100345static void allow_cpu_feat(unsigned long nr)
346{
347 set_bit_inv(nr, kvm_s390_available_cpu_feat);
348}
349
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350static inline int plo_test_bit(unsigned char nr)
351{
352 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100353 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200354
355 asm volatile(
356 /* Parameter registers are ignored for "test bit" */
357 " plo 0,0,0,0(0)\n"
358 " ipm %0\n"
359 " srl %0,28\n"
360 : "=d" (cc)
361 : "d" (r0)
362 : "cc");
363 return cc == 0;
364}
365
Heiko Carstensd0dea732019-10-02 14:34:37 +0200366static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500367{
368 register unsigned long r0 asm("0") = 0; /* query function */
369 register unsigned long r1 asm("1") = (unsigned long) query;
370
371 asm volatile(
372 /* Parameter regs are ignored */
373 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200374 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500375 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200376 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500377}
378
Christian Borntraeger173aec22018-12-28 10:59:06 +0100379#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100380#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100381
David Hildenbrand22be5a132016-01-21 13:22:54 +0100382static void kvm_s390_cpu_feat_init(void)
383{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200384 int i;
385
386 for (i = 0; i < 256; ++i) {
387 if (plo_test_bit(i))
388 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
389 }
390
391 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400392 ptff(kvm_s390_available_subfunc.ptff,
393 sizeof(kvm_s390_available_subfunc.ptff),
394 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200395
396 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200397 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.kmac);
399 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kmc);
401 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.km);
403 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
404 kvm_s390_available_subfunc.kimd);
405 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 }
408 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200409 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
410 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200411 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200412 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmctr);
414 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.kmf);
416 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
417 kvm_s390_available_subfunc.kmo);
418 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
419 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420 }
421 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100422 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200423 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200424
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400425 if (test_facility(146)) /* MSA8 */
426 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kma);
428
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100429 if (test_facility(155)) /* MSA9 */
430 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
431 kvm_s390_available_subfunc.kdsa);
432
Christian Borntraeger173aec22018-12-28 10:59:06 +0100433 if (test_facility(150)) /* SORTL */
434 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
435
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100436 if (test_facility(151)) /* DFLTCC */
437 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
438
David Hildenbrand22be5a132016-01-21 13:22:54 +0100439 if (MACHINE_HAS_ESOP)
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200441 /*
442 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
443 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
444 */
445 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100446 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200447 return;
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100449 if (sclp.has_64bscao)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100451 if (sclp.has_siif)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100453 if (sclp.has_gpere)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100455 if (sclp.has_gsls)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100457 if (sclp.has_ib)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100459 if (sclp.has_cei)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100461 if (sclp.has_ibs)
462 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500463 if (sclp.has_kss)
464 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200465 /*
466 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
467 * all skey handling functions read/set the skey from the PGSTE
468 * instead of the real storage key.
469 *
470 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
471 * pages being detected as preserved although they are resident.
472 *
473 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
474 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
475 *
476 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
477 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
478 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
479 *
480 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
481 * cannot easily shadow the SCA because of the ipte lock.
482 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100483}
484
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100485int kvm_arch_init(void *opaque)
486{
Janosch Frankf76f6372019-10-02 03:56:27 -0400487 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100488
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf)
491 return -ENOMEM;
492
Janosch Frank3e6c5562019-10-02 04:46:58 -0400493 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
494 if (!kvm_s390_dbf_uv)
495 goto out;
496
497 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
498 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400499 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200500
David Hildenbrand22be5a132016-01-21 13:22:54 +0100501 kvm_s390_cpu_feat_init();
502
Cornelia Huck84877d92014-09-02 10:27:35 +0100503 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
505 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100506 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400507 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100508 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
510 rc = kvm_s390_gib_init(GAL_ISC);
511 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400512 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100513
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return 0;
515
Janosch Frankf76f6372019-10-02 03:56:27 -0400516out:
517 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100518 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100519}
520
Christian Borntraeger78f26132015-07-22 15:50:58 +0200521void kvm_arch_exit(void)
522{
Michael Mueller1282c212019-01-31 09:52:40 +0100523 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200524 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400525 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200526}
527
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100528/* Section: device related */
529long kvm_arch_dev_ioctl(struct file *filp,
530 unsigned int ioctl, unsigned long arg)
531{
532 if (ioctl == KVM_S390_ENABLE_SIE)
533 return s390_enable_sie();
534 return -EINVAL;
535}
536
Alexander Graf784aa3d2014-07-14 18:27:35 +0200537int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100538{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100539 int r;
540
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200541 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100542 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200543 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100544 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100545#ifdef CONFIG_KVM_S390_UCONTROL
546 case KVM_CAP_S390_UCONTROL:
547#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200548 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100549 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200550 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100551 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100552 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100553 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200554 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200555 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200556 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200557 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100558 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100559 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200560 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100561 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400562 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100563 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200564 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200565 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100566 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100567 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500568 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400569 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400570 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100571 r = 1;
572 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300573 case KVM_CAP_SET_GUEST_DEBUG2:
574 r = KVM_GUESTDBG_VALID_MASK;
575 break;
Janosch Franka4499382018-07-13 11:28:31 +0100576 case KVM_CAP_S390_HPAGE_1M:
577 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100578 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100579 r = 1;
580 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100581 case KVM_CAP_S390_MEM_OP:
582 r = MEM_OP_MAX_SIZE;
583 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200584 case KVM_CAP_NR_VCPUS:
585 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200586 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200588 if (!kvm_s390_use_sca_entries())
589 r = KVM_MAX_VCPUS;
590 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100591 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200592 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200593 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100594 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200595 break;
Eric Farman68c55752014-06-09 10:57:26 -0400596 case KVM_CAP_S390_VECTOR_REGISTERS:
597 r = MACHINE_HAS_VX;
598 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800599 case KVM_CAP_S390_RI:
600 r = test_facility(64);
601 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100602 case KVM_CAP_S390_GS:
603 r = test_facility(133);
604 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100605 case KVM_CAP_S390_BPB:
606 r = test_facility(82);
607 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500608 case KVM_CAP_S390_PROTECTED:
609 r = is_prot_virt_host();
610 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200611 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100612 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200613 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100614 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100615}
616
Sean Christopherson0dff0842020-02-18 13:07:29 -0800617void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618{
Janosch Frank0959e162018-07-17 13:21:22 +0100619 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100621 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400622 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100623 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400624
Janosch Frank0959e162018-07-17 13:21:22 +0100625 /* Loop over all guest segments */
626 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400627 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100628 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
629 gaddr = gfn_to_gpa(cur_gfn);
630 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
631 if (kvm_is_error_hva(vmaddr))
632 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400633
Janosch Frank0959e162018-07-17 13:21:22 +0100634 bitmap_zero(bitmap, _PAGE_ENTRIES);
635 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
636 for (i = 0; i < _PAGE_ENTRIES; i++) {
637 if (test_bit(i, bitmap))
638 mark_page_dirty(kvm, cur_gfn + i);
639 }
640
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100641 if (fatal_signal_pending(current))
642 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100643 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400644 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400645}
646
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100647/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200648static void sca_del_vcpu(struct kvm_vcpu *vcpu);
649
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650/*
651 * Get (and clear) the dirty memory log for a memory slot.
652 */
653int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
654 struct kvm_dirty_log *log)
655{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400656 int r;
657 unsigned long n;
658 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800659 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400660
Janosch Franke1e8a962017-02-02 16:39:31 +0100661 if (kvm_is_ucontrol(kvm))
662 return -EINVAL;
663
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400664 mutex_lock(&kvm->slots_lock);
665
666 r = -EINVAL;
667 if (log->slot >= KVM_USER_MEM_SLOTS)
668 goto out;
669
Sean Christopherson2a49f612020-02-18 13:07:30 -0800670 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400671 if (r)
672 goto out;
673
674 /* Clear the dirty log */
675 if (is_dirty) {
676 n = kvm_dirty_bitmap_bytes(memslot);
677 memset(memslot->dirty_bitmap, 0, n);
678 }
679 r = 0;
680out:
681 mutex_unlock(&kvm->slots_lock);
682 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100683}
684
David Hildenbrand6502a342016-06-21 14:19:51 +0200685static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
686{
687 unsigned int i;
688 struct kvm_vcpu *vcpu;
689
690 kvm_for_each_vcpu(i, vcpu, kvm) {
691 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
692 }
693}
694
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100695int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200696{
697 int r;
698
699 if (cap->flags)
700 return -EINVAL;
701
702 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200703 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200704 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200705 kvm->arch.use_irqchip = 1;
706 r = 0;
707 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200708 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200709 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200710 kvm->arch.user_sigp = 1;
711 r = 0;
712 break;
Eric Farman68c55752014-06-09 10:57:26 -0400713 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200715 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100716 r = -EBUSY;
717 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100718 set_kvm_facility(kvm->arch.model.fac_mask, 129);
719 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200720 if (test_facility(134)) {
721 set_kvm_facility(kvm->arch.model.fac_mask, 134);
722 set_kvm_facility(kvm->arch.model.fac_list, 134);
723 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100724 if (test_facility(135)) {
725 set_kvm_facility(kvm->arch.model.fac_mask, 135);
726 set_kvm_facility(kvm->arch.model.fac_list, 135);
727 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100728 if (test_facility(148)) {
729 set_kvm_facility(kvm->arch.model.fac_mask, 148);
730 set_kvm_facility(kvm->arch.model.fac_list, 148);
731 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100732 if (test_facility(152)) {
733 set_kvm_facility(kvm->arch.model.fac_mask, 152);
734 set_kvm_facility(kvm->arch.model.fac_list, 152);
735 }
Michael Mueller18280d82015-03-16 16:05:41 +0100736 r = 0;
737 } else
738 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100739 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200740 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
741 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400742 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800743 case KVM_CAP_S390_RI:
744 r = -EINVAL;
745 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200746 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800747 r = -EBUSY;
748 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100749 set_kvm_facility(kvm->arch.model.fac_mask, 64);
750 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800751 r = 0;
752 }
753 mutex_unlock(&kvm->lock);
754 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
755 r ? "(not available)" : "(success)");
756 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100757 case KVM_CAP_S390_AIS:
758 mutex_lock(&kvm->lock);
759 if (kvm->created_vcpus) {
760 r = -EBUSY;
761 } else {
762 set_kvm_facility(kvm->arch.model.fac_mask, 72);
763 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100764 r = 0;
765 }
766 mutex_unlock(&kvm->lock);
767 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
768 r ? "(not available)" : "(success)");
769 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100770 case KVM_CAP_S390_GS:
771 r = -EINVAL;
772 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100773 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100774 r = -EBUSY;
775 } else if (test_facility(133)) {
776 set_kvm_facility(kvm->arch.model.fac_mask, 133);
777 set_kvm_facility(kvm->arch.model.fac_list, 133);
778 r = 0;
779 }
780 mutex_unlock(&kvm->lock);
781 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
782 r ? "(not available)" : "(success)");
783 break;
Janosch Franka4499382018-07-13 11:28:31 +0100784 case KVM_CAP_S390_HPAGE_1M:
785 mutex_lock(&kvm->lock);
786 if (kvm->created_vcpus)
787 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100788 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100789 r = -EINVAL;
790 else {
791 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700792 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100793 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700794 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100795 /*
796 * We might have to create fake 4k page
797 * tables. To avoid that the hardware works on
798 * stale PGSTEs, we emulate these instructions.
799 */
800 kvm->arch.use_skf = 0;
801 kvm->arch.use_pfmfi = 0;
802 }
803 mutex_unlock(&kvm->lock);
804 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
805 r ? "(not available)" : "(success)");
806 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100807 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200808 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100809 kvm->arch.user_stsi = 1;
810 r = 0;
811 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200812 case KVM_CAP_S390_USER_INSTR0:
813 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
814 kvm->arch.user_instr0 = 1;
815 icpt_operexc_on_all_vcpus(kvm);
816 r = 0;
817 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200818 default:
819 r = -EINVAL;
820 break;
821 }
822 return r;
823}
824
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100825static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
826{
827 int ret;
828
829 switch (attr->attr) {
830 case KVM_S390_VM_MEM_LIMIT_SIZE:
831 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200832 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100833 kvm->arch.mem_limit);
834 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100835 ret = -EFAULT;
836 break;
837 default:
838 ret = -ENXIO;
839 break;
840 }
841 return ret;
842}
843
844static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200845{
846 int ret;
847 unsigned int idx;
848 switch (attr->attr) {
849 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100850 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100851 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200852 break;
853
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200854 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200855 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100856 if (kvm->created_vcpus)
857 ret = -EBUSY;
858 else if (kvm->mm->context.allow_gmap_hpage_1m)
859 ret = -EINVAL;
860 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200861 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100862 /* Not compatible with cmma. */
863 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200864 ret = 0;
865 }
866 mutex_unlock(&kvm->lock);
867 break;
868 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100869 ret = -ENXIO;
870 if (!sclp.has_cmma)
871 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200872 ret = -EINVAL;
873 if (!kvm->arch.use_cmma)
874 break;
875
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200876 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200877 mutex_lock(&kvm->lock);
878 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200879 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200880 srcu_read_unlock(&kvm->srcu, idx);
881 mutex_unlock(&kvm->lock);
882 ret = 0;
883 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100884 case KVM_S390_VM_MEM_LIMIT_SIZE: {
885 unsigned long new_limit;
886
887 if (kvm_is_ucontrol(kvm))
888 return -EINVAL;
889
890 if (get_user(new_limit, (u64 __user *)attr->addr))
891 return -EFAULT;
892
Dominik Dingela3a92c32014-12-01 17:24:42 +0100893 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
894 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100895 return -E2BIG;
896
Dominik Dingela3a92c32014-12-01 17:24:42 +0100897 if (!new_limit)
898 return -EINVAL;
899
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100900 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100901 if (new_limit != KVM_S390_NO_MEM_LIMIT)
902 new_limit -= 1;
903
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100904 ret = -EBUSY;
905 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200906 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100907 /* gmap_create will round the limit up */
908 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100909
910 if (!new) {
911 ret = -ENOMEM;
912 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100913 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100914 new->private = kvm;
915 kvm->arch.gmap = new;
916 ret = 0;
917 }
918 }
919 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100920 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
921 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
922 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100923 break;
924 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200925 default:
926 ret = -ENXIO;
927 break;
928 }
929 return ret;
930}
931
Tony Krowiaka374e892014-09-03 10:13:53 +0200932static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
933
Tony Krowiak20c922f2018-04-22 11:37:03 -0400934void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200935{
936 struct kvm_vcpu *vcpu;
937 int i;
938
Tony Krowiak20c922f2018-04-22 11:37:03 -0400939 kvm_s390_vcpu_block_all(kvm);
940
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400941 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400942 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400943 /* recreate the shadow crycb by leaving the VSIE handler */
944 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
945 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400946
947 kvm_s390_vcpu_unblock_all(kvm);
948}
949
950static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
951{
Tony Krowiaka374e892014-09-03 10:13:53 +0200952 mutex_lock(&kvm->lock);
953 switch (attr->attr) {
954 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200955 if (!test_kvm_facility(kvm, 76)) {
956 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400957 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200958 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200959 get_random_bytes(
960 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
961 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
962 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200963 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200964 break;
965 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200966 if (!test_kvm_facility(kvm, 76)) {
967 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400968 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200969 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200970 get_random_bytes(
971 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
972 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
973 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200974 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200975 break;
976 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200977 if (!test_kvm_facility(kvm, 76)) {
978 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400979 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200980 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200981 kvm->arch.crypto.aes_kw = 0;
982 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
983 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200984 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200985 break;
986 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200987 if (!test_kvm_facility(kvm, 76)) {
988 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400989 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200990 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200991 kvm->arch.crypto.dea_kw = 0;
992 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
993 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200994 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200995 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400996 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
997 if (!ap_instructions_available()) {
998 mutex_unlock(&kvm->lock);
999 return -EOPNOTSUPP;
1000 }
1001 kvm->arch.crypto.apie = 1;
1002 break;
1003 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1004 if (!ap_instructions_available()) {
1005 mutex_unlock(&kvm->lock);
1006 return -EOPNOTSUPP;
1007 }
1008 kvm->arch.crypto.apie = 0;
1009 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001010 default:
1011 mutex_unlock(&kvm->lock);
1012 return -ENXIO;
1013 }
1014
Tony Krowiak20c922f2018-04-22 11:37:03 -04001015 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001016 mutex_unlock(&kvm->lock);
1017 return 0;
1018}
1019
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001020static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1021{
1022 int cx;
1023 struct kvm_vcpu *vcpu;
1024
1025 kvm_for_each_vcpu(cx, vcpu, kvm)
1026 kvm_s390_sync_request(req, vcpu);
1027}
1028
1029/*
1030 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001031 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001032 */
1033static int kvm_s390_vm_start_migration(struct kvm *kvm)
1034{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001035 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001036 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001037 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 int slotnr;
1039
1040 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001041 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 slots = kvm_memslots(kvm);
1044 if (!slots || !slots->used_slots)
1045 return -EINVAL;
1046
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001047 if (!kvm->arch.use_cmma) {
1048 kvm->arch.migration_mode = 1;
1049 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001050 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001051 /* mark all the pages in active slots as dirty */
1052 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1053 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001054 if (!ms->dirty_bitmap)
1055 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001056 /*
1057 * The second half of the bitmap is only used on x86,
1058 * and would be wasted otherwise, so we put it to good
1059 * use here to keep track of the state of the storage
1060 * attributes.
1061 */
1062 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1063 ram_pages += ms->npages;
1064 }
1065 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1066 kvm->arch.migration_mode = 1;
1067 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001068 return 0;
1069}
1070
1071/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001072 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001073 * kvm_s390_vm_start_migration.
1074 */
1075static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1076{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001077 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001078 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001079 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001080 kvm->arch.migration_mode = 0;
1081 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001082 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001083 return 0;
1084}
1085
1086static int kvm_s390_vm_set_migration(struct kvm *kvm,
1087 struct kvm_device_attr *attr)
1088{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001089 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001090
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001091 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001092 switch (attr->attr) {
1093 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001094 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001095 break;
1096 case KVM_S390_VM_MIGRATION_STOP:
1097 res = kvm_s390_vm_stop_migration(kvm);
1098 break;
1099 default:
1100 break;
1101 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001102 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001103
1104 return res;
1105}
1106
1107static int kvm_s390_vm_get_migration(struct kvm *kvm,
1108 struct kvm_device_attr *attr)
1109{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001110 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001111
1112 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1113 return -ENXIO;
1114
1115 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1116 return -EFAULT;
1117 return 0;
1118}
1119
Collin L. Walling8fa16962016-07-26 15:29:44 -04001120static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1121{
1122 struct kvm_s390_vm_tod_clock gtod;
1123
1124 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1125 return -EFAULT;
1126
David Hildenbrand0e7def52018-02-07 12:46:43 +01001127 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001128 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001130
1131 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1132 gtod.epoch_idx, gtod.tod);
1133
1134 return 0;
1135}
1136
Jason J. Herne72f25022014-11-25 09:46:02 -05001137static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1138{
1139 u8 gtod_high;
1140
1141 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1142 sizeof(gtod_high)))
1143 return -EFAULT;
1144
1145 if (gtod_high != 0)
1146 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001147 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001148
1149 return 0;
1150}
1151
1152static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1153{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001154 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001155
David Hildenbrand0e7def52018-02-07 12:46:43 +01001156 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1157 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001158 return -EFAULT;
1159
David Hildenbrand0e7def52018-02-07 12:46:43 +01001160 kvm_s390_set_tod_clock(kvm, &gtod);
1161 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001162 return 0;
1163}
1164
1165static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1166{
1167 int ret;
1168
1169 if (attr->flags)
1170 return -EINVAL;
1171
1172 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001173 case KVM_S390_VM_TOD_EXT:
1174 ret = kvm_s390_set_tod_ext(kvm, attr);
1175 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001176 case KVM_S390_VM_TOD_HIGH:
1177 ret = kvm_s390_set_tod_high(kvm, attr);
1178 break;
1179 case KVM_S390_VM_TOD_LOW:
1180 ret = kvm_s390_set_tod_low(kvm, attr);
1181 break;
1182 default:
1183 ret = -ENXIO;
1184 break;
1185 }
1186 return ret;
1187}
1188
David Hildenbrand33d1b272018-04-27 14:36:13 +02001189static void kvm_s390_get_tod_clock(struct kvm *kvm,
1190 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001191{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001192 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001193
1194 preempt_disable();
1195
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001196 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001197
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001198 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001199 gtod->epoch_idx = 0;
1200 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001201 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1202 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001203 gtod->epoch_idx += 1;
1204 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001205
1206 preempt_enable();
1207}
1208
1209static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1210{
1211 struct kvm_s390_vm_tod_clock gtod;
1212
1213 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001214 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001215 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1216 return -EFAULT;
1217
1218 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1219 gtod.epoch_idx, gtod.tod);
1220 return 0;
1221}
1222
Jason J. Herne72f25022014-11-25 09:46:02 -05001223static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1224{
1225 u8 gtod_high = 0;
1226
1227 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1228 sizeof(gtod_high)))
1229 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001230 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001231
1232 return 0;
1233}
1234
1235static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1236{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001237 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001238
David Hildenbrand60417fc2015-09-29 16:20:36 +02001239 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001240 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1241 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001242 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001243
1244 return 0;
1245}
1246
1247static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1248{
1249 int ret;
1250
1251 if (attr->flags)
1252 return -EINVAL;
1253
1254 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001255 case KVM_S390_VM_TOD_EXT:
1256 ret = kvm_s390_get_tod_ext(kvm, attr);
1257 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001258 case KVM_S390_VM_TOD_HIGH:
1259 ret = kvm_s390_get_tod_high(kvm, attr);
1260 break;
1261 case KVM_S390_VM_TOD_LOW:
1262 ret = kvm_s390_get_tod_low(kvm, attr);
1263 break;
1264 default:
1265 ret = -ENXIO;
1266 break;
1267 }
1268 return ret;
1269}
1270
Michael Mueller658b6ed2015-02-02 15:49:35 +01001271static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1272{
1273 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001274 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001275 int ret = 0;
1276
1277 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001278 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001279 ret = -EBUSY;
1280 goto out;
1281 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001282 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 if (!proc) {
1284 ret = -ENOMEM;
1285 goto out;
1286 }
1287 if (!copy_from_user(proc, (void __user *)attr->addr,
1288 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001289 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001290 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1291 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001292 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001293 if (proc->ibc > unblocked_ibc)
1294 kvm->arch.model.ibc = unblocked_ibc;
1295 else if (proc->ibc < lowest_ibc)
1296 kvm->arch.model.ibc = lowest_ibc;
1297 else
1298 kvm->arch.model.ibc = proc->ibc;
1299 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001300 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001301 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001302 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1303 kvm->arch.model.ibc,
1304 kvm->arch.model.cpuid);
1305 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1306 kvm->arch.model.fac_list[0],
1307 kvm->arch.model.fac_list[1],
1308 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001309 } else
1310 ret = -EFAULT;
1311 kfree(proc);
1312out:
1313 mutex_unlock(&kvm->lock);
1314 return ret;
1315}
1316
David Hildenbrand15c97052015-03-19 17:36:43 +01001317static int kvm_s390_set_processor_feat(struct kvm *kvm,
1318 struct kvm_device_attr *attr)
1319{
1320 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001321
1322 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1323 return -EFAULT;
1324 if (!bitmap_subset((unsigned long *) data.feat,
1325 kvm_s390_available_cpu_feat,
1326 KVM_S390_VM_CPU_FEAT_NR_BITS))
1327 return -EINVAL;
1328
1329 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001330 if (kvm->created_vcpus) {
1331 mutex_unlock(&kvm->lock);
1332 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001333 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001334 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1335 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001336 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001337 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1338 data.feat[0],
1339 data.feat[1],
1340 data.feat[2]);
1341 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001342}
1343
David Hildenbrand0a763c72016-05-18 16:03:47 +02001344static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1345 struct kvm_device_attr *attr)
1346{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001347 mutex_lock(&kvm->lock);
1348 if (kvm->created_vcpus) {
1349 mutex_unlock(&kvm->lock);
1350 return -EBUSY;
1351 }
1352
1353 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1354 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1355 mutex_unlock(&kvm->lock);
1356 return -EFAULT;
1357 }
1358 mutex_unlock(&kvm->lock);
1359
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001360 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1365 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1366 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1368 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1371 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1374 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1375 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1377 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1379 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1380 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1381 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1382 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1383 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1384 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1386 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1387 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1388 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1389 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1391 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1392 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1394 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1395 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1396 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1397 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1398 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1399 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1400 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1401 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1402 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1403 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001404 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1406 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001407 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1408 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1409 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001412 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1413 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1414 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001417
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001418 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001419}
1420
Michael Mueller658b6ed2015-02-02 15:49:35 +01001421static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1422{
1423 int ret = -ENXIO;
1424
1425 switch (attr->attr) {
1426 case KVM_S390_VM_CPU_PROCESSOR:
1427 ret = kvm_s390_set_processor(kvm, attr);
1428 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001429 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1430 ret = kvm_s390_set_processor_feat(kvm, attr);
1431 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001432 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1433 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1434 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001435 }
1436 return ret;
1437}
1438
1439static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1440{
1441 struct kvm_s390_vm_cpu_processor *proc;
1442 int ret = 0;
1443
Christian Borntraegerc4196212020-11-06 08:34:23 +01001444 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001445 if (!proc) {
1446 ret = -ENOMEM;
1447 goto out;
1448 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001449 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001450 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001451 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1452 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001453 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1454 kvm->arch.model.ibc,
1455 kvm->arch.model.cpuid);
1456 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1457 kvm->arch.model.fac_list[0],
1458 kvm->arch.model.fac_list[1],
1459 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001460 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1461 ret = -EFAULT;
1462 kfree(proc);
1463out:
1464 return ret;
1465}
1466
1467static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1468{
1469 struct kvm_s390_vm_cpu_machine *mach;
1470 int ret = 0;
1471
Christian Borntraegerc4196212020-11-06 08:34:23 +01001472 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001473 if (!mach) {
1474 ret = -ENOMEM;
1475 goto out;
1476 }
1477 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001478 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001479 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001480 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001481 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001482 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001483 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1484 kvm->arch.model.ibc,
1485 kvm->arch.model.cpuid);
1486 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1487 mach->fac_mask[0],
1488 mach->fac_mask[1],
1489 mach->fac_mask[2]);
1490 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1491 mach->fac_list[0],
1492 mach->fac_list[1],
1493 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001494 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1495 ret = -EFAULT;
1496 kfree(mach);
1497out:
1498 return ret;
1499}
1500
David Hildenbrand15c97052015-03-19 17:36:43 +01001501static int kvm_s390_get_processor_feat(struct kvm *kvm,
1502 struct kvm_device_attr *attr)
1503{
1504 struct kvm_s390_vm_cpu_feat data;
1505
1506 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1507 KVM_S390_VM_CPU_FEAT_NR_BITS);
1508 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1509 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001510 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1511 data.feat[0],
1512 data.feat[1],
1513 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001514 return 0;
1515}
1516
1517static int kvm_s390_get_machine_feat(struct kvm *kvm,
1518 struct kvm_device_attr *attr)
1519{
1520 struct kvm_s390_vm_cpu_feat data;
1521
1522 bitmap_copy((unsigned long *) data.feat,
1523 kvm_s390_available_cpu_feat,
1524 KVM_S390_VM_CPU_FEAT_NR_BITS);
1525 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1526 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001527 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1528 data.feat[0],
1529 data.feat[1],
1530 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001531 return 0;
1532}
1533
David Hildenbrand0a763c72016-05-18 16:03:47 +02001534static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1535 struct kvm_device_attr *attr)
1536{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001537 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1538 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1539 return -EFAULT;
1540
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001541 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1546 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1549 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1552 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1555 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1556 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1558 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1561 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1562 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1563 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1564 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1565 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1567 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1569 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1570 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1572 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1573 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1575 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1576 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1578 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1579 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1580 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1581 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1582 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1583 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1584 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001585 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1587 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001588 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1590 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001593 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1594 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1595 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001598
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001599 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001600}
1601
1602static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1603 struct kvm_device_attr *attr)
1604{
1605 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1606 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1607 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001608
1609 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1612 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1613 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1614 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1615 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1616 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1617 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1620 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1623 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1624 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1625 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1626 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1627 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1628 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1629 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1630 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1631 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1632 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1633 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1634 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1635 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1636 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1637 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1638 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1639 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1640 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1641 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1642 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1643 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1644 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1645 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1646 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1647 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1648 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1649 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1650 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1651 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1652 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001653 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1654 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1655 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001656 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1657 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1658 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1659 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1660 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001661 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1662 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1663 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1664 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1665 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001666
David Hildenbrand0a763c72016-05-18 16:03:47 +02001667 return 0;
1668}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001669
Michael Mueller658b6ed2015-02-02 15:49:35 +01001670static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1671{
1672 int ret = -ENXIO;
1673
1674 switch (attr->attr) {
1675 case KVM_S390_VM_CPU_PROCESSOR:
1676 ret = kvm_s390_get_processor(kvm, attr);
1677 break;
1678 case KVM_S390_VM_CPU_MACHINE:
1679 ret = kvm_s390_get_machine(kvm, attr);
1680 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001681 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1682 ret = kvm_s390_get_processor_feat(kvm, attr);
1683 break;
1684 case KVM_S390_VM_CPU_MACHINE_FEAT:
1685 ret = kvm_s390_get_machine_feat(kvm, attr);
1686 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001687 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1688 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1689 break;
1690 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1691 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1692 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001693 }
1694 return ret;
1695}
1696
Dominik Dingelf2061652014-04-09 13:13:00 +02001697static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1698{
1699 int ret;
1700
1701 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001702 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001703 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001704 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001705 case KVM_S390_VM_TOD:
1706 ret = kvm_s390_set_tod(kvm, attr);
1707 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001708 case KVM_S390_VM_CPU_MODEL:
1709 ret = kvm_s390_set_cpu_model(kvm, attr);
1710 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001711 case KVM_S390_VM_CRYPTO:
1712 ret = kvm_s390_vm_set_crypto(kvm, attr);
1713 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001714 case KVM_S390_VM_MIGRATION:
1715 ret = kvm_s390_vm_set_migration(kvm, attr);
1716 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001717 default:
1718 ret = -ENXIO;
1719 break;
1720 }
1721
1722 return ret;
1723}
1724
1725static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1726{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001727 int ret;
1728
1729 switch (attr->group) {
1730 case KVM_S390_VM_MEM_CTRL:
1731 ret = kvm_s390_get_mem_control(kvm, attr);
1732 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001733 case KVM_S390_VM_TOD:
1734 ret = kvm_s390_get_tod(kvm, attr);
1735 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001736 case KVM_S390_VM_CPU_MODEL:
1737 ret = kvm_s390_get_cpu_model(kvm, attr);
1738 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001739 case KVM_S390_VM_MIGRATION:
1740 ret = kvm_s390_vm_get_migration(kvm, attr);
1741 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001742 default:
1743 ret = -ENXIO;
1744 break;
1745 }
1746
1747 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001748}
1749
1750static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1751{
1752 int ret;
1753
1754 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001755 case KVM_S390_VM_MEM_CTRL:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_MEM_ENABLE_CMMA:
1758 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001759 ret = sclp.has_cmma ? 0 : -ENXIO;
1760 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001761 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001762 ret = 0;
1763 break;
1764 default:
1765 ret = -ENXIO;
1766 break;
1767 }
1768 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001769 case KVM_S390_VM_TOD:
1770 switch (attr->attr) {
1771 case KVM_S390_VM_TOD_LOW:
1772 case KVM_S390_VM_TOD_HIGH:
1773 ret = 0;
1774 break;
1775 default:
1776 ret = -ENXIO;
1777 break;
1778 }
1779 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001780 case KVM_S390_VM_CPU_MODEL:
1781 switch (attr->attr) {
1782 case KVM_S390_VM_CPU_PROCESSOR:
1783 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001784 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1785 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001786 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001787 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001788 ret = 0;
1789 break;
1790 default:
1791 ret = -ENXIO;
1792 break;
1793 }
1794 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001795 case KVM_S390_VM_CRYPTO:
1796 switch (attr->attr) {
1797 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1798 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1799 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1800 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1801 ret = 0;
1802 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001803 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1804 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1805 ret = ap_instructions_available() ? 0 : -ENXIO;
1806 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001807 default:
1808 ret = -ENXIO;
1809 break;
1810 }
1811 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001812 case KVM_S390_VM_MIGRATION:
1813 ret = 0;
1814 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001815 default:
1816 ret = -ENXIO;
1817 break;
1818 }
1819
1820 return ret;
1821}
1822
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1824{
1825 uint8_t *keys;
1826 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001827 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001828
1829 if (args->flags != 0)
1830 return -EINVAL;
1831
1832 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001833 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001834 return KVM_S390_GET_SKEYS_NONE;
1835
1836 /* Enforce sane limit on memory allocation */
1837 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1838 return -EINVAL;
1839
Christian Borntraegerc4196212020-11-06 08:34:23 +01001840 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001841 if (!keys)
1842 return -ENOMEM;
1843
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001844 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001845 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001846 for (i = 0; i < args->count; i++) {
1847 hva = gfn_to_hva(kvm, args->start_gfn + i);
1848 if (kvm_is_error_hva(hva)) {
1849 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001850 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001851 }
1852
David Hildenbrand154c8c12016-05-09 11:22:34 +02001853 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1854 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001855 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001856 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001857 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001858 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001859
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001860 if (!r) {
1861 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1862 sizeof(uint8_t) * args->count);
1863 if (r)
1864 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001865 }
1866
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867 kvfree(keys);
1868 return r;
1869}
1870
1871static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1872{
1873 uint8_t *keys;
1874 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001875 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001876 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001877
1878 if (args->flags != 0)
1879 return -EINVAL;
1880
1881 /* Enforce sane limit on memory allocation */
1882 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1883 return -EINVAL;
1884
Christian Borntraegerc4196212020-11-06 08:34:23 +01001885 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001886 if (!keys)
1887 return -ENOMEM;
1888
1889 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1890 sizeof(uint8_t) * args->count);
1891 if (r) {
1892 r = -EFAULT;
1893 goto out;
1894 }
1895
1896 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001897 r = s390_enable_skey();
1898 if (r)
1899 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001900
Janosch Frankbd096f62018-07-18 13:40:22 +01001901 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001902 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001903 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001904 while (i < args->count) {
1905 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001906 hva = gfn_to_hva(kvm, args->start_gfn + i);
1907 if (kvm_is_error_hva(hva)) {
1908 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001909 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001910 }
1911
1912 /* Lowest order bit is reserved */
1913 if (keys[i] & 0x01) {
1914 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001915 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001916 }
1917
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001918 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001919 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001920 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001921 FAULT_FLAG_WRITE, &unlocked);
1922 if (r)
1923 break;
1924 }
1925 if (!r)
1926 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001927 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001928 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001929 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001930out:
1931 kvfree(keys);
1932 return r;
1933}
1934
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001935/*
1936 * Base address and length must be sent at the start of each block, therefore
1937 * it's cheaper to send some clean data, as long as it's less than the size of
1938 * two longs.
1939 */
1940#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1941/* for consistency */
1942#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1943
1944/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001945 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1946 * address falls in a hole. In that case the index of one of the memslots
1947 * bordering the hole is returned.
1948 */
1949static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1950{
1951 int start = 0, end = slots->used_slots;
1952 int slot = atomic_read(&slots->lru_slot);
1953 struct kvm_memory_slot *memslots = slots->memslots;
1954
1955 if (gfn >= memslots[slot].base_gfn &&
1956 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1957 return slot;
1958
1959 while (start < end) {
1960 slot = start + (end - start) / 2;
1961
1962 if (gfn >= memslots[slot].base_gfn)
1963 end = slot;
1964 else
1965 start = slot + 1;
1966 }
1967
Sean Christopherson97daa022020-04-07 23:40:59 -07001968 if (start >= slots->used_slots)
1969 return slots->used_slots - 1;
1970
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001971 if (gfn >= memslots[start].base_gfn &&
1972 gfn < memslots[start].base_gfn + memslots[start].npages) {
1973 atomic_set(&slots->lru_slot, start);
1974 }
1975
1976 return start;
1977}
1978
1979static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1980 u8 *res, unsigned long bufsize)
1981{
1982 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1983
1984 args->count = 0;
1985 while (args->count < bufsize) {
1986 hva = gfn_to_hva(kvm, cur_gfn);
1987 /*
1988 * We return an error if the first value was invalid, but we
1989 * return successfully if at least one value was copied.
1990 */
1991 if (kvm_is_error_hva(hva))
1992 return args->count ? 0 : -EFAULT;
1993 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1994 pgstev = 0;
1995 res[args->count++] = (pgstev >> 24) & 0x43;
1996 cur_gfn++;
1997 }
1998
1999 return 0;
2000}
2001
2002static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2003 unsigned long cur_gfn)
2004{
2005 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2006 struct kvm_memory_slot *ms = slots->memslots + slotidx;
2007 unsigned long ofs = cur_gfn - ms->base_gfn;
2008
2009 if (ms->base_gfn + ms->npages <= cur_gfn) {
2010 slotidx--;
2011 /* If we are above the highest slot, wrap around */
2012 if (slotidx < 0)
2013 slotidx = slots->used_slots - 1;
2014
2015 ms = slots->memslots + slotidx;
2016 ofs = 0;
2017 }
2018 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2019 while ((slotidx > 0) && (ofs >= ms->npages)) {
2020 slotidx--;
2021 ms = slots->memslots + slotidx;
2022 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2023 }
2024 return ms->base_gfn + ofs;
2025}
2026
2027static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2028 u8 *res, unsigned long bufsize)
2029{
2030 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2031 struct kvm_memslots *slots = kvm_memslots(kvm);
2032 struct kvm_memory_slot *ms;
2033
Sean Christopherson0774a962020-03-20 13:55:40 -07002034 if (unlikely(!slots->used_slots))
2035 return 0;
2036
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002037 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2038 ms = gfn_to_memslot(kvm, cur_gfn);
2039 args->count = 0;
2040 args->start_gfn = cur_gfn;
2041 if (!ms)
2042 return 0;
2043 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2044 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2045
2046 while (args->count < bufsize) {
2047 hva = gfn_to_hva(kvm, cur_gfn);
2048 if (kvm_is_error_hva(hva))
2049 return 0;
2050 /* Decrement only if we actually flipped the bit to 0 */
2051 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2052 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2053 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2054 pgstev = 0;
2055 /* Save the value */
2056 res[args->count++] = (pgstev >> 24) & 0x43;
2057 /* If the next bit is too far away, stop. */
2058 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2059 return 0;
2060 /* If we reached the previous "next", find the next one */
2061 if (cur_gfn == next_gfn)
2062 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2063 /* Reached the end of memory or of the buffer, stop */
2064 if ((next_gfn >= mem_end) ||
2065 (next_gfn - args->start_gfn >= bufsize))
2066 return 0;
2067 cur_gfn++;
2068 /* Reached the end of the current memslot, take the next one. */
2069 if (cur_gfn - ms->base_gfn >= ms->npages) {
2070 ms = gfn_to_memslot(kvm, cur_gfn);
2071 if (!ms)
2072 return 0;
2073 }
2074 }
2075 return 0;
2076}
2077
2078/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002079 * This function searches for the next page with dirty CMMA attributes, and
2080 * saves the attributes in the buffer up to either the end of the buffer or
2081 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2082 * no trailing clean bytes are saved.
2083 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2084 * output buffer will indicate 0 as length.
2085 */
2086static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2087 struct kvm_s390_cmma_log *args)
2088{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002089 unsigned long bufsize;
2090 int srcu_idx, peek, ret;
2091 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002094 return -ENXIO;
2095 /* Invalid/unsupported flags were specified */
2096 if (args->flags & ~KVM_S390_CMMA_PEEK)
2097 return -EINVAL;
2098 /* Migration mode query, and we are not doing a migration */
2099 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002100 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002101 return -EINVAL;
2102 /* CMMA is disabled or was not used, or the buffer has length zero */
2103 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002104 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002105 memset(args, 0, sizeof(*args));
2106 return 0;
2107 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002108 /* We are not peeking, and there are no dirty pages */
2109 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2110 memset(args, 0, sizeof(*args));
2111 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002112 }
2113
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002114 values = vmalloc(bufsize);
2115 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002116 return -ENOMEM;
2117
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002118 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002119 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002120 if (peek)
2121 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2122 else
2123 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002124 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002125 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002126
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002127 if (kvm->arch.migration_mode)
2128 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2129 else
2130 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002131
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002132 if (copy_to_user((void __user *)args->values, values, args->count))
2133 ret = -EFAULT;
2134
2135 vfree(values);
2136 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002137}
2138
2139/*
2140 * This function sets the CMMA attributes for the given pages. If the input
2141 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002142 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002143 */
2144static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2145 const struct kvm_s390_cmma_log *args)
2146{
2147 unsigned long hva, mask, pgstev, i;
2148 uint8_t *bits;
2149 int srcu_idx, r = 0;
2150
2151 mask = args->mask;
2152
2153 if (!kvm->arch.use_cmma)
2154 return -ENXIO;
2155 /* invalid/unsupported flags */
2156 if (args->flags != 0)
2157 return -EINVAL;
2158 /* Enforce sane limit on memory allocation */
2159 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2160 return -EINVAL;
2161 /* Nothing to do */
2162 if (args->count == 0)
2163 return 0;
2164
Kees Cook42bc47b2018-06-12 14:27:11 -07002165 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002166 if (!bits)
2167 return -ENOMEM;
2168
2169 r = copy_from_user(bits, (void __user *)args->values, args->count);
2170 if (r) {
2171 r = -EFAULT;
2172 goto out;
2173 }
2174
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002175 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002176 srcu_idx = srcu_read_lock(&kvm->srcu);
2177 for (i = 0; i < args->count; i++) {
2178 hva = gfn_to_hva(kvm, args->start_gfn + i);
2179 if (kvm_is_error_hva(hva)) {
2180 r = -EFAULT;
2181 break;
2182 }
2183
2184 pgstev = bits[i];
2185 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002186 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002187 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2188 }
2189 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002190 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002191
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002192 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002193 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002194 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002195 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002196 }
2197out:
2198 vfree(bits);
2199 return r;
2200}
2201
Janosch Frank29b40f12019-09-30 04:19:18 -04002202static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2203{
2204 struct kvm_vcpu *vcpu;
2205 u16 rc, rrc;
2206 int ret = 0;
2207 int i;
2208
2209 /*
2210 * We ignore failures and try to destroy as many CPUs as possible.
2211 * At the same time we must not free the assigned resources when
2212 * this fails, as the ultravisor has still access to that memory.
2213 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2214 * behind.
2215 * We want to return the first failure rc and rrc, though.
2216 */
2217 kvm_for_each_vcpu(i, vcpu, kvm) {
2218 mutex_lock(&vcpu->mutex);
2219 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2220 *rcp = rc;
2221 *rrcp = rrc;
2222 ret = -EIO;
2223 }
2224 mutex_unlock(&vcpu->mutex);
2225 }
2226 return ret;
2227}
2228
2229static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2230{
2231 int i, r = 0;
2232 u16 dummy;
2233
2234 struct kvm_vcpu *vcpu;
2235
2236 kvm_for_each_vcpu(i, vcpu, kvm) {
2237 mutex_lock(&vcpu->mutex);
2238 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2239 mutex_unlock(&vcpu->mutex);
2240 if (r)
2241 break;
2242 }
2243 if (r)
2244 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2245 return r;
2246}
2247
2248static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2249{
2250 int r = 0;
2251 u16 dummy;
2252 void __user *argp = (void __user *)cmd->data;
2253
2254 switch (cmd->cmd) {
2255 case KVM_PV_ENABLE: {
2256 r = -EINVAL;
2257 if (kvm_s390_pv_is_protected(kvm))
2258 break;
2259
2260 /*
2261 * FMT 4 SIE needs esca. As we never switch back to bsca from
2262 * esca, we need no cleanup in the error cases below
2263 */
2264 r = sca_switch_to_extended(kvm);
2265 if (r)
2266 break;
2267
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002268 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002269 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002270 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002271 if (r)
2272 break;
2273
Janosch Frank29b40f12019-09-30 04:19:18 -04002274 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2275 if (r)
2276 break;
2277
2278 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2279 if (r)
2280 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002281
2282 /* we need to block service interrupts from now on */
2283 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002284 break;
2285 }
2286 case KVM_PV_DISABLE: {
2287 r = -EINVAL;
2288 if (!kvm_s390_pv_is_protected(kvm))
2289 break;
2290
2291 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2292 /*
2293 * If a CPU could not be destroyed, destroy VM will also fail.
2294 * There is no point in trying to destroy it. Instead return
2295 * the rc and rrc from the first CPU that failed destroying.
2296 */
2297 if (r)
2298 break;
2299 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002300
2301 /* no need to block service interrupts any more */
2302 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002303 break;
2304 }
2305 case KVM_PV_SET_SEC_PARMS: {
2306 struct kvm_s390_pv_sec_parm parms = {};
2307 void *hdr;
2308
2309 r = -EINVAL;
2310 if (!kvm_s390_pv_is_protected(kvm))
2311 break;
2312
2313 r = -EFAULT;
2314 if (copy_from_user(&parms, argp, sizeof(parms)))
2315 break;
2316
2317 /* Currently restricted to 8KB */
2318 r = -EINVAL;
2319 if (parms.length > PAGE_SIZE * 2)
2320 break;
2321
2322 r = -ENOMEM;
2323 hdr = vmalloc(parms.length);
2324 if (!hdr)
2325 break;
2326
2327 r = -EFAULT;
2328 if (!copy_from_user(hdr, (void __user *)parms.origin,
2329 parms.length))
2330 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2331 &cmd->rc, &cmd->rrc);
2332
2333 vfree(hdr);
2334 break;
2335 }
2336 case KVM_PV_UNPACK: {
2337 struct kvm_s390_pv_unp unp = {};
2338
2339 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002340 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002341 break;
2342
2343 r = -EFAULT;
2344 if (copy_from_user(&unp, argp, sizeof(unp)))
2345 break;
2346
2347 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2348 &cmd->rc, &cmd->rrc);
2349 break;
2350 }
2351 case KVM_PV_VERIFY: {
2352 r = -EINVAL;
2353 if (!kvm_s390_pv_is_protected(kvm))
2354 break;
2355
2356 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2357 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2358 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2359 cmd->rrc);
2360 break;
2361 }
Janosch Franke0d27732019-05-09 13:07:21 +02002362 case KVM_PV_PREP_RESET: {
2363 r = -EINVAL;
2364 if (!kvm_s390_pv_is_protected(kvm))
2365 break;
2366
2367 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2368 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2369 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2370 cmd->rc, cmd->rrc);
2371 break;
2372 }
2373 case KVM_PV_UNSHARE_ALL: {
2374 r = -EINVAL;
2375 if (!kvm_s390_pv_is_protected(kvm))
2376 break;
2377
2378 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2379 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2380 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2381 cmd->rc, cmd->rrc);
2382 break;
2383 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002384 default:
2385 r = -ENOTTY;
2386 }
2387 return r;
2388}
2389
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390long kvm_arch_vm_ioctl(struct file *filp,
2391 unsigned int ioctl, unsigned long arg)
2392{
2393 struct kvm *kvm = filp->private_data;
2394 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002395 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396 int r;
2397
2398 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002399 case KVM_S390_INTERRUPT: {
2400 struct kvm_s390_interrupt s390int;
2401
2402 r = -EFAULT;
2403 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2404 break;
2405 r = kvm_s390_inject_vm(kvm, &s390int);
2406 break;
2407 }
Cornelia Huck84223592013-07-15 13:36:01 +02002408 case KVM_CREATE_IRQCHIP: {
2409 struct kvm_irq_routing_entry routing;
2410
2411 r = -EINVAL;
2412 if (kvm->arch.use_irqchip) {
2413 /* Set up dummy routing. */
2414 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002415 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002416 }
2417 break;
2418 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002419 case KVM_SET_DEVICE_ATTR: {
2420 r = -EFAULT;
2421 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2422 break;
2423 r = kvm_s390_vm_set_attr(kvm, &attr);
2424 break;
2425 }
2426 case KVM_GET_DEVICE_ATTR: {
2427 r = -EFAULT;
2428 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2429 break;
2430 r = kvm_s390_vm_get_attr(kvm, &attr);
2431 break;
2432 }
2433 case KVM_HAS_DEVICE_ATTR: {
2434 r = -EFAULT;
2435 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2436 break;
2437 r = kvm_s390_vm_has_attr(kvm, &attr);
2438 break;
2439 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002440 case KVM_S390_GET_SKEYS: {
2441 struct kvm_s390_skeys args;
2442
2443 r = -EFAULT;
2444 if (copy_from_user(&args, argp,
2445 sizeof(struct kvm_s390_skeys)))
2446 break;
2447 r = kvm_s390_get_skeys(kvm, &args);
2448 break;
2449 }
2450 case KVM_S390_SET_SKEYS: {
2451 struct kvm_s390_skeys args;
2452
2453 r = -EFAULT;
2454 if (copy_from_user(&args, argp,
2455 sizeof(struct kvm_s390_skeys)))
2456 break;
2457 r = kvm_s390_set_skeys(kvm, &args);
2458 break;
2459 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002460 case KVM_S390_GET_CMMA_BITS: {
2461 struct kvm_s390_cmma_log args;
2462
2463 r = -EFAULT;
2464 if (copy_from_user(&args, argp, sizeof(args)))
2465 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002466 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002467 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002468 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002469 if (!r) {
2470 r = copy_to_user(argp, &args, sizeof(args));
2471 if (r)
2472 r = -EFAULT;
2473 }
2474 break;
2475 }
2476 case KVM_S390_SET_CMMA_BITS: {
2477 struct kvm_s390_cmma_log args;
2478
2479 r = -EFAULT;
2480 if (copy_from_user(&args, argp, sizeof(args)))
2481 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002482 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002483 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002484 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002485 break;
2486 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002487 case KVM_S390_PV_COMMAND: {
2488 struct kvm_pv_cmd args;
2489
Janosch Frankfe28c7862019-05-15 13:24:30 +02002490 /* protvirt means user sigp */
2491 kvm->arch.user_cpu_state_ctrl = 1;
Janosch Frank29b40f12019-09-30 04:19:18 -04002492 r = 0;
2493 if (!is_prot_virt_host()) {
2494 r = -EINVAL;
2495 break;
2496 }
2497 if (copy_from_user(&args, argp, sizeof(args))) {
2498 r = -EFAULT;
2499 break;
2500 }
2501 if (args.flags) {
2502 r = -EINVAL;
2503 break;
2504 }
2505 mutex_lock(&kvm->lock);
2506 r = kvm_s390_handle_pv(kvm, &args);
2507 mutex_unlock(&kvm->lock);
2508 if (copy_to_user(argp, &args, sizeof(args))) {
2509 r = -EFAULT;
2510 break;
2511 }
2512 break;
2513 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002514 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002515 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516 }
2517
2518 return r;
2519}
2520
Tony Krowiak45c9b472015-01-13 11:33:26 -05002521static int kvm_s390_apxa_installed(void)
2522{
Tony Krowiake585b242018-09-25 19:16:18 -04002523 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002524
Tony Krowiake585b242018-09-25 19:16:18 -04002525 if (ap_instructions_available()) {
2526 if (ap_qci(&info) == 0)
2527 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002528 }
2529
2530 return 0;
2531}
2532
Tony Krowiake585b242018-09-25 19:16:18 -04002533/*
2534 * The format of the crypto control block (CRYCB) is specified in the 3 low
2535 * order bits of the CRYCB designation (CRYCBD) field as follows:
2536 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2537 * AP extended addressing (APXA) facility are installed.
2538 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2539 * Format 2: Both the APXA and MSAX3 facilities are installed
2540 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002541static void kvm_s390_set_crycb_format(struct kvm *kvm)
2542{
2543 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2544
Tony Krowiake585b242018-09-25 19:16:18 -04002545 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2546 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2547
2548 /* Check whether MSAX3 is installed */
2549 if (!test_kvm_facility(kvm, 76))
2550 return;
2551
Tony Krowiak45c9b472015-01-13 11:33:26 -05002552 if (kvm_s390_apxa_installed())
2553 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2554 else
2555 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2556}
2557
Pierre Morel0e237e42018-10-05 10:31:09 +02002558void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2559 unsigned long *aqm, unsigned long *adm)
2560{
2561 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2562
2563 mutex_lock(&kvm->lock);
2564 kvm_s390_vcpu_block_all(kvm);
2565
2566 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2567 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2568 memcpy(crycb->apcb1.apm, apm, 32);
2569 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2570 apm[0], apm[1], apm[2], apm[3]);
2571 memcpy(crycb->apcb1.aqm, aqm, 32);
2572 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2573 aqm[0], aqm[1], aqm[2], aqm[3]);
2574 memcpy(crycb->apcb1.adm, adm, 32);
2575 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2576 adm[0], adm[1], adm[2], adm[3]);
2577 break;
2578 case CRYCB_FORMAT1:
2579 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2580 memcpy(crycb->apcb0.apm, apm, 8);
2581 memcpy(crycb->apcb0.aqm, aqm, 2);
2582 memcpy(crycb->apcb0.adm, adm, 2);
2583 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2584 apm[0], *((unsigned short *)aqm),
2585 *((unsigned short *)adm));
2586 break;
2587 default: /* Can not happen */
2588 break;
2589 }
2590
2591 /* recreate the shadow crycb for each vcpu */
2592 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2593 kvm_s390_vcpu_unblock_all(kvm);
2594 mutex_unlock(&kvm->lock);
2595}
2596EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2597
Tony Krowiak421045982018-09-25 19:16:25 -04002598void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2599{
2600 mutex_lock(&kvm->lock);
2601 kvm_s390_vcpu_block_all(kvm);
2602
2603 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2604 sizeof(kvm->arch.crypto.crycb->apcb0));
2605 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2606 sizeof(kvm->arch.crypto.crycb->apcb1));
2607
Pierre Morel0e237e42018-10-05 10:31:09 +02002608 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002609 /* recreate the shadow crycb for each vcpu */
2610 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002611 kvm_s390_vcpu_unblock_all(kvm);
2612 mutex_unlock(&kvm->lock);
2613}
2614EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2615
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002616static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002617{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002618 struct cpuid cpuid;
2619
2620 get_cpu_id(&cpuid);
2621 cpuid.version = 0xff;
2622 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002623}
2624
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002625static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002626{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002627 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002628 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002629
Tony Krowiake585b242018-09-25 19:16:18 -04002630 if (!test_kvm_facility(kvm, 76))
2631 return;
2632
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002633 /* Enable AES/DEA protected key functions by default */
2634 kvm->arch.crypto.aes_kw = 1;
2635 kvm->arch.crypto.dea_kw = 1;
2636 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2637 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2638 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2639 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002640}
2641
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002642static void sca_dispose(struct kvm *kvm)
2643{
2644 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002645 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002646 else
2647 free_page((unsigned long)(kvm->arch.sca));
2648 kvm->arch.sca = NULL;
2649}
2650
Carsten Ottee08b9632012-01-04 10:25:20 +01002651int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002652{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002653 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002654 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002655 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002656 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002657
Carsten Ottee08b9632012-01-04 10:25:20 +01002658 rc = -EINVAL;
2659#ifdef CONFIG_KVM_S390_UCONTROL
2660 if (type & ~KVM_VM_S390_UCONTROL)
2661 goto out_err;
2662 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2663 goto out_err;
2664#else
2665 if (type)
2666 goto out_err;
2667#endif
2668
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002669 rc = s390_enable_sie();
2670 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002671 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002672
Carsten Otteb2904112011-10-18 12:27:13 +02002673 rc = -ENOMEM;
2674
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002675 if (!sclp.has_64bscao)
2676 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002677 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002678 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002679 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002680 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002681 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002682 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002683 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002684 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002685 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002686 kvm->arch.sca = (struct bsca_block *)
2687 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002688 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002689
2690 sprintf(debug_name, "kvm-%u", current->pid);
2691
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002692 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002694 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002695
Michael Mueller19114be2017-05-30 14:26:02 +02002696 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002697 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002698 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002699 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002700 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002701
Michael Mueller25c84db2019-01-31 09:52:41 +01002702 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002703 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002704
2705 for (i = 0; i < kvm_s390_fac_size(); i++) {
2706 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2707 (kvm_s390_fac_base[i] |
2708 kvm_s390_fac_ext[i]);
2709 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2710 kvm_s390_fac_base[i];
2711 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002712 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002713
David Hildenbrand19352222017-08-29 16:31:08 +02002714 /* we are always in czam mode - even on pre z14 machines */
2715 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2716 set_kvm_facility(kvm->arch.model.fac_list, 138);
2717 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002718 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2719 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002720 if (MACHINE_HAS_TLB_GUEST) {
2721 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2722 set_kvm_facility(kvm->arch.model.fac_list, 147);
2723 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002724
Pierre Morel05f31e32019-05-21 17:34:37 +02002725 if (css_general_characteristics.aiv && test_facility(65))
2726 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2727
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002728 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002729 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002730
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002731 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002732
Fei Li51978392017-02-17 17:06:26 +08002733 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002734 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002735 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2736 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002737 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002738 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002739
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002740 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002741 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002742
Carsten Ottee08b9632012-01-04 10:25:20 +01002743 if (type & KVM_VM_S390_UCONTROL) {
2744 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002745 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002746 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002747 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002748 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002749 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002750 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002751 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002752 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002753 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002754 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002755 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002756 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002757 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002758
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002759 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002760 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002761 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002762 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002763 if (use_gisa)
2764 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002765 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002766
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002767 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002768out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002769 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002770 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002771 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002772 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002773 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002774}
2775
Christian Borntraegerd329c032008-11-26 14:50:27 +01002776void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2777{
Janosch Frank29b40f12019-09-30 04:19:18 -04002778 u16 rc, rrc;
2779
Christian Borntraegerd329c032008-11-26 14:50:27 +01002780 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002781 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002782 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002783 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002784 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002785 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002786
2787 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002788 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002789
Dominik Dingele6db1d62015-05-07 15:41:57 +02002790 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002791 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002792 /* We can not hold the vcpu mutex here, we are already dying */
2793 if (kvm_s390_pv_cpu_get_handle(vcpu))
2794 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002795 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002796}
2797
2798static void kvm_free_vcpus(struct kvm *kvm)
2799{
2800 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002801 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002802
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002803 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002804 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002805
2806 mutex_lock(&kvm->lock);
2807 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2808 kvm->vcpus[i] = NULL;
2809
2810 atomic_set(&kvm->online_vcpus, 0);
2811 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002812}
2813
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002814void kvm_arch_destroy_vm(struct kvm *kvm)
2815{
Janosch Frank29b40f12019-09-30 04:19:18 -04002816 u16 rc, rrc;
2817
Christian Borntraegerd329c032008-11-26 14:50:27 +01002818 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002819 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002820 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002821 /*
2822 * We are already at the end of life and kvm->lock is not taken.
2823 * This is ok as the file descriptor is closed by now and nobody
2824 * can mess with the pv state. To avoid lockdep_assert_held from
2825 * complaining we do not use kvm_s390_pv_is_protected.
2826 */
2827 if (kvm_s390_pv_get_handle(kvm))
2828 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2829 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002830 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002831 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002832 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002833 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002834 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002835 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002836 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002837}
2838
2839/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002840static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2841{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002842 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002843 if (!vcpu->arch.gmap)
2844 return -ENOMEM;
2845 vcpu->arch.gmap->private = vcpu->kvm;
2846
2847 return 0;
2848}
2849
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002850static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2851{
David Hildenbranda6940672016-08-08 22:39:32 +02002852 if (!kvm_s390_use_sca_entries())
2853 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002854 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002855 if (vcpu->kvm->arch.use_esca) {
2856 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002857
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002858 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002859 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002860 } else {
2861 struct bsca_block *sca = vcpu->kvm->arch.sca;
2862
2863 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002864 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002865 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002866 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002867}
2868
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002869static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002870{
David Hildenbranda6940672016-08-08 22:39:32 +02002871 if (!kvm_s390_use_sca_entries()) {
2872 struct bsca_block *sca = vcpu->kvm->arch.sca;
2873
2874 /* we still need the basic sca for the ipte control */
2875 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2876 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002877 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002878 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002879 read_lock(&vcpu->kvm->arch.sca_lock);
2880 if (vcpu->kvm->arch.use_esca) {
2881 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002882
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002883 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002884 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2885 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002886 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002887 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002888 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002889 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002890
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002891 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002892 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2893 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002894 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002895 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002896 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002897}
2898
2899/* Basic SCA to Extended SCA data copy routines */
2900static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2901{
2902 d->sda = s->sda;
2903 d->sigp_ctrl.c = s->sigp_ctrl.c;
2904 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2905}
2906
2907static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2908{
2909 int i;
2910
2911 d->ipte_control = s->ipte_control;
2912 d->mcn[0] = s->mcn;
2913 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2914 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2915}
2916
2917static int sca_switch_to_extended(struct kvm *kvm)
2918{
2919 struct bsca_block *old_sca = kvm->arch.sca;
2920 struct esca_block *new_sca;
2921 struct kvm_vcpu *vcpu;
2922 unsigned int vcpu_idx;
2923 u32 scaol, scaoh;
2924
Janosch Frank29b40f12019-09-30 04:19:18 -04002925 if (kvm->arch.use_esca)
2926 return 0;
2927
Christian Borntraegerc4196212020-11-06 08:34:23 +01002928 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002929 if (!new_sca)
2930 return -ENOMEM;
2931
2932 scaoh = (u32)((u64)(new_sca) >> 32);
2933 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2934
2935 kvm_s390_vcpu_block_all(kvm);
2936 write_lock(&kvm->arch.sca_lock);
2937
2938 sca_copy_b_to_e(new_sca, old_sca);
2939
2940 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2941 vcpu->arch.sie_block->scaoh = scaoh;
2942 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002943 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002944 }
2945 kvm->arch.sca = new_sca;
2946 kvm->arch.use_esca = 1;
2947
2948 write_unlock(&kvm->arch.sca_lock);
2949 kvm_s390_vcpu_unblock_all(kvm);
2950
2951 free_page((unsigned long)old_sca);
2952
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002953 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2954 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002955 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002956}
2957
2958static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2959{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002960 int rc;
2961
David Hildenbranda6940672016-08-08 22:39:32 +02002962 if (!kvm_s390_use_sca_entries()) {
2963 if (id < KVM_MAX_VCPUS)
2964 return true;
2965 return false;
2966 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002967 if (id < KVM_S390_BSCA_CPU_SLOTS)
2968 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002969 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002970 return false;
2971
2972 mutex_lock(&kvm->lock);
2973 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2974 mutex_unlock(&kvm->lock);
2975
2976 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002977}
2978
David Hildenbranddb0758b2016-02-15 09:42:25 +01002979/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2980static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2981{
2982 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002983 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002984 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002985 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002986}
2987
2988/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2989static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2990{
2991 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002992 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002993 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2994 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002995 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002996}
2997
2998/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2999static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3000{
3001 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3002 vcpu->arch.cputm_enabled = true;
3003 __start_cpu_timer_accounting(vcpu);
3004}
3005
3006/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3007static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3008{
3009 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3010 __stop_cpu_timer_accounting(vcpu);
3011 vcpu->arch.cputm_enabled = false;
3012}
3013
3014static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3015{
3016 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3017 __enable_cpu_timer_accounting(vcpu);
3018 preempt_enable();
3019}
3020
3021static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3022{
3023 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3024 __disable_cpu_timer_accounting(vcpu);
3025 preempt_enable();
3026}
3027
David Hildenbrand4287f242016-02-15 09:40:12 +01003028/* set the cpu timer - may only be called from the VCPU thread itself */
3029void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3030{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003031 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003032 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003033 if (vcpu->arch.cputm_enabled)
3034 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003035 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003036 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003037 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003038}
3039
David Hildenbranddb0758b2016-02-15 09:42:25 +01003040/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003041__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3042{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003043 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003044 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003045
3046 if (unlikely(!vcpu->arch.cputm_enabled))
3047 return vcpu->arch.sie_block->cputm;
3048
David Hildenbrand9c23a132016-02-17 21:53:33 +01003049 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3050 do {
3051 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3052 /*
3053 * If the writer would ever execute a read in the critical
3054 * section, e.g. in irq context, we have a deadlock.
3055 */
3056 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3057 value = vcpu->arch.sie_block->cputm;
3058 /* if cputm_start is 0, accounting is being started/stopped */
3059 if (likely(vcpu->arch.cputm_start))
3060 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3061 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3062 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003063 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003064}
3065
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003066void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3067{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003068
David Hildenbrand37d9df92015-03-11 16:47:33 +01003069 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003070 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003071 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003072 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003073 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003074}
3075
3076void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3077{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003078 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003079 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003080 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003081 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003082 vcpu->arch.enabled_gmap = gmap_get_enabled();
3083 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003084
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003085}
3086
Dominik Dingel31928aa2014-12-04 15:47:07 +01003087void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003088{
Jason J. Herne72f25022014-11-25 09:46:02 -05003089 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003090 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003091 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003092 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003093 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003094 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003095 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003096 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003097 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003098 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003099 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3100 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003101 /* make vcpu_load load the right gmap on the first trigger */
3102 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003103}
3104
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003105static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3106{
3107 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3108 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3109 return true;
3110 return false;
3111}
3112
3113static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3114{
3115 /* At least one ECC subfunction must be present */
3116 return kvm_has_pckmo_subfunc(kvm, 32) ||
3117 kvm_has_pckmo_subfunc(kvm, 33) ||
3118 kvm_has_pckmo_subfunc(kvm, 34) ||
3119 kvm_has_pckmo_subfunc(kvm, 40) ||
3120 kvm_has_pckmo_subfunc(kvm, 41);
3121
3122}
3123
Tony Krowiak5102ee82014-06-27 14:46:01 -04003124static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3125{
Tony Krowiake585b242018-09-25 19:16:18 -04003126 /*
3127 * If the AP instructions are not being interpreted and the MSAX3
3128 * facility is not configured for the guest, there is nothing to set up.
3129 */
3130 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003131 return;
3132
Tony Krowiake585b242018-09-25 19:16:18 -04003133 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003134 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003135 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003136 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003137
Tony Krowiake585b242018-09-25 19:16:18 -04003138 if (vcpu->kvm->arch.crypto.apie)
3139 vcpu->arch.sie_block->eca |= ECA_APIE;
3140
3141 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003142 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003143 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003144 /* ecc is also wrapped with AES key */
3145 if (kvm_has_pckmo_ecc(vcpu->kvm))
3146 vcpu->arch.sie_block->ecd |= ECD_ECC;
3147 }
3148
Tony Krowiaka374e892014-09-03 10:13:53 +02003149 if (vcpu->kvm->arch.crypto.dea_kw)
3150 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003151}
3152
Dominik Dingelb31605c2014-03-25 13:47:11 +01003153void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3154{
3155 free_page(vcpu->arch.sie_block->cbrlo);
3156 vcpu->arch.sie_block->cbrlo = 0;
3157}
3158
3159int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3160{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003161 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003162 if (!vcpu->arch.sie_block->cbrlo)
3163 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003164 return 0;
3165}
3166
Michael Mueller91520f12015-02-27 14:32:11 +01003167static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3168{
3169 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3170
Michael Mueller91520f12015-02-27 14:32:11 +01003171 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003172 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003173 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003174}
3175
Sean Christophersonff72bb52019-12-18 13:55:20 -08003176static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3177{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003178 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003179 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003180
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003181 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3182 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003183 CPUSTAT_STOPPED);
3184
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003185 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003186 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003187 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003188 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003189
Michael Mueller91520f12015-02-27 14:32:11 +01003190 kvm_s390_vcpu_setup_model(vcpu);
3191
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003192 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3193 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003194 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003195 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003196 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003197 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003198 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003199
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003200 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003201 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003202 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003203 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3204 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003205 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003206 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003207 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003208 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003209 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003210 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003211 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003212 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003213 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003214 vcpu->arch.sie_block->eca |= ECA_VX;
3215 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003216 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003217 if (test_kvm_facility(vcpu->kvm, 139))
3218 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003219 if (test_kvm_facility(vcpu->kvm, 156))
3220 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003221 if (vcpu->arch.sie_block->gd) {
3222 vcpu->arch.sie_block->eca |= ECA_AIV;
3223 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3224 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3225 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003226 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3227 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003228 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003229
3230 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003231 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003232 else
3233 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003234
Dominik Dingele6db1d62015-05-07 15:41:57 +02003235 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003236 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3237 if (rc)
3238 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003239 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003240 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003241 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003242
Collin Walling67d49d52018-08-31 12:51:19 -04003243 vcpu->arch.sie_block->hpid = HPID_KVM;
3244
Tony Krowiak5102ee82014-06-27 14:46:01 -04003245 kvm_s390_vcpu_crypto_setup(vcpu);
3246
Janosch Frank29b40f12019-09-30 04:19:18 -04003247 mutex_lock(&vcpu->kvm->lock);
3248 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3249 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3250 if (rc)
3251 kvm_s390_vcpu_unsetup_cmma(vcpu);
3252 }
3253 mutex_unlock(&vcpu->kvm->lock);
3254
Dominik Dingelb31605c2014-03-25 13:47:11 +01003255 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003256}
3257
Sean Christopherson897cc382019-12-18 13:55:09 -08003258int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3259{
3260 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3261 return -EINVAL;
3262 return 0;
3263}
3264
Sean Christophersone529ef62019-12-18 13:55:15 -08003265int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003266{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003267 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003268 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003269
QingFeng Haoda72ca42017-06-07 11:41:19 +02003270 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003271 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003272 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003273 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003274
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003275 vcpu->arch.sie_block = &sie_page->sie_block;
3276 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3277
David Hildenbrandefed1102015-04-16 12:32:41 +02003278 /* the real guest size will always be smaller than msl */
3279 vcpu->arch.sie_block->mso = 0;
3280 vcpu->arch.sie_block->msl = sclp.hamax;
3281
Sean Christophersone529ef62019-12-18 13:55:15 -08003282 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003283 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003284 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003285 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3286 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003287 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003288
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003289 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3290 kvm_clear_async_pf_completion_queue(vcpu);
3291 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3292 KVM_SYNC_GPRS |
3293 KVM_SYNC_ACRS |
3294 KVM_SYNC_CRS |
3295 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003296 KVM_SYNC_PFAULT |
3297 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003298 kvm_s390_set_prefix(vcpu, 0);
3299 if (test_kvm_facility(vcpu->kvm, 64))
3300 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3301 if (test_kvm_facility(vcpu->kvm, 82))
3302 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3303 if (test_kvm_facility(vcpu->kvm, 133))
3304 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3305 if (test_kvm_facility(vcpu->kvm, 156))
3306 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3307 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3308 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3309 */
3310 if (MACHINE_HAS_VX)
3311 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3312 else
3313 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3314
3315 if (kvm_is_ucontrol(vcpu->kvm)) {
3316 rc = __kvm_ucontrol_vcpu_init(vcpu);
3317 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003318 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003319 }
3320
Sean Christophersone529ef62019-12-18 13:55:15 -08003321 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3322 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3323 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003324
Sean Christophersonff72bb52019-12-18 13:55:20 -08003325 rc = kvm_s390_vcpu_setup(vcpu);
3326 if (rc)
3327 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003328 return 0;
3329
Sean Christophersonff72bb52019-12-18 13:55:20 -08003330out_ucontrol_uninit:
3331 if (kvm_is_ucontrol(vcpu->kvm))
3332 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003333out_free_sie_block:
3334 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003335 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003336}
3337
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003338int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3339{
David Hildenbrand9a022062014-08-05 17:40:47 +02003340 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003341}
3342
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003343bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3344{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003345 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003346}
3347
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003348void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003349{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003350 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003351 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003352}
3353
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003354void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003355{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003356 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003357}
3358
Christian Borntraeger8e236542015-04-09 13:49:04 +02003359static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3360{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003361 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003362 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003363}
3364
David Hildenbrand9ea59722018-09-25 19:16:16 -04003365bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3366{
3367 return atomic_read(&vcpu->arch.sie_block->prog20) &
3368 (PROG_BLOCK_SIE | PROG_REQUEST);
3369}
3370
Christian Borntraeger8e236542015-04-09 13:49:04 +02003371static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3372{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003373 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003374}
3375
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003376/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003377 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003378 * If the CPU is not running (e.g. waiting as idle) the function will
3379 * return immediately. */
3380void exit_sie(struct kvm_vcpu *vcpu)
3381{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003382 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003383 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003384 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3385 cpu_relax();
3386}
3387
Christian Borntraeger8e236542015-04-09 13:49:04 +02003388/* Kick a guest cpu out of SIE to process a request synchronously */
3389void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003390{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003391 kvm_make_request(req, vcpu);
3392 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003393}
3394
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003395static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3396 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003397{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003398 struct kvm *kvm = gmap->private;
3399 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003400 unsigned long prefix;
3401 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003402
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003403 if (gmap_is_shadow(gmap))
3404 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003405 if (start >= 1UL << 31)
3406 /* We are only interested in prefix pages */
3407 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003408 kvm_for_each_vcpu(i, vcpu, kvm) {
3409 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003410 prefix = kvm_s390_get_prefix(vcpu);
3411 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3412 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3413 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003414 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003415 }
3416 }
3417}
3418
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003419bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3420{
3421 /* do not poll with more than halt_poll_max_steal percent of steal time */
3422 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3423 halt_poll_max_steal) {
3424 vcpu->stat.halt_no_poll_steal++;
3425 return true;
3426 }
3427 return false;
3428}
3429
Christoffer Dallb6d33832012-03-08 16:44:24 -05003430int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3431{
3432 /* kvm common code refers to this, but never calls it */
3433 BUG();
3434 return 0;
3435}
3436
Carsten Otte14eebd92012-05-15 14:15:26 +02003437static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3438 struct kvm_one_reg *reg)
3439{
3440 int r = -EINVAL;
3441
3442 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003443 case KVM_REG_S390_TODPR:
3444 r = put_user(vcpu->arch.sie_block->todpr,
3445 (u32 __user *)reg->addr);
3446 break;
3447 case KVM_REG_S390_EPOCHDIFF:
3448 r = put_user(vcpu->arch.sie_block->epoch,
3449 (u64 __user *)reg->addr);
3450 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003451 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003452 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003453 (u64 __user *)reg->addr);
3454 break;
3455 case KVM_REG_S390_CLOCK_COMP:
3456 r = put_user(vcpu->arch.sie_block->ckc,
3457 (u64 __user *)reg->addr);
3458 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003459 case KVM_REG_S390_PFTOKEN:
3460 r = put_user(vcpu->arch.pfault_token,
3461 (u64 __user *)reg->addr);
3462 break;
3463 case KVM_REG_S390_PFCOMPARE:
3464 r = put_user(vcpu->arch.pfault_compare,
3465 (u64 __user *)reg->addr);
3466 break;
3467 case KVM_REG_S390_PFSELECT:
3468 r = put_user(vcpu->arch.pfault_select,
3469 (u64 __user *)reg->addr);
3470 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003471 case KVM_REG_S390_PP:
3472 r = put_user(vcpu->arch.sie_block->pp,
3473 (u64 __user *)reg->addr);
3474 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003475 case KVM_REG_S390_GBEA:
3476 r = put_user(vcpu->arch.sie_block->gbea,
3477 (u64 __user *)reg->addr);
3478 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003479 default:
3480 break;
3481 }
3482
3483 return r;
3484}
3485
3486static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3487 struct kvm_one_reg *reg)
3488{
3489 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003490 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003491
3492 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003493 case KVM_REG_S390_TODPR:
3494 r = get_user(vcpu->arch.sie_block->todpr,
3495 (u32 __user *)reg->addr);
3496 break;
3497 case KVM_REG_S390_EPOCHDIFF:
3498 r = get_user(vcpu->arch.sie_block->epoch,
3499 (u64 __user *)reg->addr);
3500 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003501 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003502 r = get_user(val, (u64 __user *)reg->addr);
3503 if (!r)
3504 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003505 break;
3506 case KVM_REG_S390_CLOCK_COMP:
3507 r = get_user(vcpu->arch.sie_block->ckc,
3508 (u64 __user *)reg->addr);
3509 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003510 case KVM_REG_S390_PFTOKEN:
3511 r = get_user(vcpu->arch.pfault_token,
3512 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003513 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3514 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003515 break;
3516 case KVM_REG_S390_PFCOMPARE:
3517 r = get_user(vcpu->arch.pfault_compare,
3518 (u64 __user *)reg->addr);
3519 break;
3520 case KVM_REG_S390_PFSELECT:
3521 r = get_user(vcpu->arch.pfault_select,
3522 (u64 __user *)reg->addr);
3523 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003524 case KVM_REG_S390_PP:
3525 r = get_user(vcpu->arch.sie_block->pp,
3526 (u64 __user *)reg->addr);
3527 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003528 case KVM_REG_S390_GBEA:
3529 r = get_user(vcpu->arch.sie_block->gbea,
3530 (u64 __user *)reg->addr);
3531 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003532 default:
3533 break;
3534 }
3535
3536 return r;
3537}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003538
Janosch Frank7de3f142020-01-31 05:02:02 -05003539static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003540{
Janosch Frank7de3f142020-01-31 05:02:02 -05003541 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3542 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3543 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3544
3545 kvm_clear_async_pf_completion_queue(vcpu);
3546 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3547 kvm_s390_vcpu_stop(vcpu);
3548 kvm_s390_clear_local_irqs(vcpu);
3549}
3550
3551static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3552{
3553 /* Initial reset is a superset of the normal reset */
3554 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3555
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003556 /*
3557 * This equals initial cpu reset in pop, but we don't switch to ESA.
3558 * We do not only reset the internal data, but also ...
3559 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003560 vcpu->arch.sie_block->gpsw.mask = 0;
3561 vcpu->arch.sie_block->gpsw.addr = 0;
3562 kvm_s390_set_prefix(vcpu, 0);
3563 kvm_s390_set_cpu_timer(vcpu, 0);
3564 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003565 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3566 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3567 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003568
3569 /* ... the data in sync regs */
3570 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3571 vcpu->run->s.regs.ckc = 0;
3572 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3573 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3574 vcpu->run->psw_addr = 0;
3575 vcpu->run->psw_mask = 0;
3576 vcpu->run->s.regs.todpr = 0;
3577 vcpu->run->s.regs.cputm = 0;
3578 vcpu->run->s.regs.ckc = 0;
3579 vcpu->run->s.regs.pp = 0;
3580 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003581 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003582 /*
3583 * Do not reset these registers in the protected case, as some of
3584 * them are overlayed and they are not accessible in this case
3585 * anyway.
3586 */
3587 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3588 vcpu->arch.sie_block->gbea = 1;
3589 vcpu->arch.sie_block->pp = 0;
3590 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3591 vcpu->arch.sie_block->todpr = 0;
3592 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003593}
3594
3595static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3596{
3597 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3598
3599 /* Clear reset is a superset of the initial reset */
3600 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3601
3602 memset(&regs->gprs, 0, sizeof(regs->gprs));
3603 memset(&regs->vrs, 0, sizeof(regs->vrs));
3604 memset(&regs->acrs, 0, sizeof(regs->acrs));
3605 memset(&regs->gscb, 0, sizeof(regs->gscb));
3606
3607 regs->etoken = 0;
3608 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003609}
3610
3611int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3612{
Christoffer Dall875656f2017-12-04 21:35:27 +01003613 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003614 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003615 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003616 return 0;
3617}
3618
3619int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3620{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003621 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003622 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003623 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003624 return 0;
3625}
3626
3627int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3628 struct kvm_sregs *sregs)
3629{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003630 vcpu_load(vcpu);
3631
Christian Borntraeger59674c12012-01-11 11:20:33 +01003632 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003633 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003634
3635 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003636 return 0;
3637}
3638
3639int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3640 struct kvm_sregs *sregs)
3641{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003642 vcpu_load(vcpu);
3643
Christian Borntraeger59674c12012-01-11 11:20:33 +01003644 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003645 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003646
3647 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003648 return 0;
3649}
3650
3651int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3652{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003653 int ret = 0;
3654
3655 vcpu_load(vcpu);
3656
3657 if (test_fp_ctl(fpu->fpc)) {
3658 ret = -EINVAL;
3659 goto out;
3660 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003661 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003662 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003663 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3664 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003665 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003666 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003667
3668out:
3669 vcpu_put(vcpu);
3670 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003671}
3672
3673int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3674{
Christoffer Dall13931232017-12-04 21:35:34 +01003675 vcpu_load(vcpu);
3676
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003677 /* make sure we have the latest values */
3678 save_fpu_regs();
3679 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003680 convert_vx_to_fp((freg_t *) fpu->fprs,
3681 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003682 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003683 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003684 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003685
3686 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003687 return 0;
3688}
3689
3690static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3691{
3692 int rc = 0;
3693
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003694 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003695 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003696 else {
3697 vcpu->run->psw_mask = psw.mask;
3698 vcpu->run->psw_addr = psw.addr;
3699 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003700 return rc;
3701}
3702
3703int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3704 struct kvm_translation *tr)
3705{
3706 return -EINVAL; /* not implemented yet */
3707}
3708
David Hildenbrand27291e22014-01-23 12:26:52 +01003709#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3710 KVM_GUESTDBG_USE_HW_BP | \
3711 KVM_GUESTDBG_ENABLE)
3712
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003713int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3714 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003715{
David Hildenbrand27291e22014-01-23 12:26:52 +01003716 int rc = 0;
3717
Christoffer Dall66b56562017-12-04 21:35:33 +01003718 vcpu_load(vcpu);
3719
David Hildenbrand27291e22014-01-23 12:26:52 +01003720 vcpu->guest_debug = 0;
3721 kvm_s390_clear_bp_data(vcpu);
3722
Christoffer Dall66b56562017-12-04 21:35:33 +01003723 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3724 rc = -EINVAL;
3725 goto out;
3726 }
3727 if (!sclp.has_gpere) {
3728 rc = -EINVAL;
3729 goto out;
3730 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003731
3732 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3733 vcpu->guest_debug = dbg->control;
3734 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003735 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003736
3737 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3738 rc = kvm_s390_import_bp_data(vcpu, dbg);
3739 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003740 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003741 vcpu->arch.guestdbg.last_bp = 0;
3742 }
3743
3744 if (rc) {
3745 vcpu->guest_debug = 0;
3746 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003747 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003748 }
3749
Christoffer Dall66b56562017-12-04 21:35:33 +01003750out:
3751 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003752 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003753}
3754
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003755int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3756 struct kvm_mp_state *mp_state)
3757{
Christoffer Dallfd232562017-12-04 21:35:30 +01003758 int ret;
3759
3760 vcpu_load(vcpu);
3761
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003762 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003763 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3764 KVM_MP_STATE_OPERATING;
3765
3766 vcpu_put(vcpu);
3767 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003768}
3769
3770int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3771 struct kvm_mp_state *mp_state)
3772{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003773 int rc = 0;
3774
Christoffer Dalle83dff52017-12-04 21:35:31 +01003775 vcpu_load(vcpu);
3776
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003777 /* user space knows about this interface - let it control the state */
3778 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3779
3780 switch (mp_state->mp_state) {
3781 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003782 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003783 break;
3784 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003785 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003786 break;
3787 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003788 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3789 rc = -ENXIO;
3790 break;
3791 }
3792 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3793 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003794 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003795 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003796 default:
3797 rc = -ENXIO;
3798 }
3799
Christoffer Dalle83dff52017-12-04 21:35:31 +01003800 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003801 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003802}
3803
David Hildenbrand8ad35752014-03-14 11:00:21 +01003804static bool ibs_enabled(struct kvm_vcpu *vcpu)
3805{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003806 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003807}
3808
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003809static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3810{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003811retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003812 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003813 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003814 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003815 /*
3816 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003817 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003818 * This ensures that the ipte instruction for this request has
3819 * already finished. We might race against a second unmapper that
3820 * wants to set the blocking bit. Lets just retry the request loop.
3821 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003822 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003823 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003824 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3825 kvm_s390_get_prefix(vcpu),
3826 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003827 if (rc) {
3828 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003829 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003830 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003831 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003832 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003833
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003834 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3835 vcpu->arch.sie_block->ihcpu = 0xffff;
3836 goto retry;
3837 }
3838
David Hildenbrand8ad35752014-03-14 11:00:21 +01003839 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3840 if (!ibs_enabled(vcpu)) {
3841 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003842 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003843 }
3844 goto retry;
3845 }
3846
3847 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3848 if (ibs_enabled(vcpu)) {
3849 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003850 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003851 }
3852 goto retry;
3853 }
3854
David Hildenbrand6502a342016-06-21 14:19:51 +02003855 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3856 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3857 goto retry;
3858 }
3859
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003860 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3861 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003862 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003863 * instruction manually, in order to provide additional
3864 * functionalities needed for live migration.
3865 */
3866 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3867 goto retry;
3868 }
3869
3870 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3871 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003872 * Re-enable CMM virtualization if CMMA is available and
3873 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003874 */
3875 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003876 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003877 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3878 goto retry;
3879 }
3880
David Hildenbrand0759d062014-05-13 16:54:32 +02003881 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003882 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003883 /* we left the vsie handler, nothing to do, just clear the request */
3884 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003885
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003886 return 0;
3887}
3888
David Hildenbrand0e7def52018-02-07 12:46:43 +01003889void kvm_s390_set_tod_clock(struct kvm *kvm,
3890 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003891{
3892 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003893 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003894 int i;
3895
3896 mutex_lock(&kvm->lock);
3897 preempt_disable();
3898
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003899 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003900
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003901 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003902 kvm->arch.epdx = 0;
3903 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003904 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003905 if (kvm->arch.epoch > gtod->tod)
3906 kvm->arch.epdx -= 1;
3907 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003908
3909 kvm_s390_vcpu_block_all(kvm);
3910 kvm_for_each_vcpu(i, vcpu, kvm) {
3911 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3912 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3913 }
3914
3915 kvm_s390_vcpu_unblock_all(kvm);
3916 preempt_enable();
3917 mutex_unlock(&kvm->lock);
3918}
3919
Thomas Huthfa576c52014-05-06 17:20:16 +02003920/**
3921 * kvm_arch_fault_in_page - fault-in guest page if necessary
3922 * @vcpu: The corresponding virtual cpu
3923 * @gpa: Guest physical address
3924 * @writable: Whether the page should be writable or not
3925 *
3926 * Make sure that a guest page has been faulted-in on the host.
3927 *
3928 * Return: Zero on success, negative error code otherwise.
3929 */
3930long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003931{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003932 return gmap_fault(vcpu->arch.gmap, gpa,
3933 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003934}
3935
Dominik Dingel3c038e62013-10-07 17:11:48 +02003936static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3937 unsigned long token)
3938{
3939 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003940 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003941
3942 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003943 irq.u.ext.ext_params2 = token;
3944 irq.type = KVM_S390_INT_PFAULT_INIT;
3945 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003946 } else {
3947 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003948 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003949 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3950 }
3951}
3952
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003953bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003954 struct kvm_async_pf *work)
3955{
3956 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3957 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003958
3959 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003960}
3961
3962void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3963 struct kvm_async_pf *work)
3964{
3965 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3966 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3967}
3968
3969void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3970 struct kvm_async_pf *work)
3971{
3972 /* s390 will always inject the page directly */
3973}
3974
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003975bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003976{
3977 /*
3978 * s390 will always inject the page directly,
3979 * but we still want check_async_completion to cleanup
3980 */
3981 return true;
3982}
3983
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003984static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003985{
3986 hva_t hva;
3987 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003988
3989 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003990 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003991 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3992 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003993 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003994 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003995 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02003996 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003997 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003998 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003999 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004000 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004001 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004002
Heiko Carstens81480cc2014-01-01 16:36:07 +01004003 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4004 hva += current->thread.gmap_addr & ~PAGE_MASK;
4005 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004006 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004007
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004008 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02004009}
4010
Thomas Huth3fb4c402013-09-12 10:33:43 +02004011static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004012{
Thomas Huth3fb4c402013-09-12 10:33:43 +02004013 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01004014
Dominik Dingel3c038e62013-10-07 17:11:48 +02004015 /*
4016 * On s390 notifications for arriving pages will be delivered directly
4017 * to the guest but the house keeping for completed pfaults is
4018 * handled outside the worker.
4019 */
4020 kvm_check_async_pf_completion(vcpu);
4021
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004022 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4023 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004024
4025 if (need_resched())
4026 schedule();
4027
Jens Freimann79395032014-04-17 10:10:30 +02004028 if (!kvm_is_ucontrol(vcpu->kvm)) {
4029 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4030 if (rc)
4031 return rc;
4032 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004033
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004034 rc = kvm_s390_handle_requests(vcpu);
4035 if (rc)
4036 return rc;
4037
David Hildenbrand27291e22014-01-23 12:26:52 +01004038 if (guestdbg_enabled(vcpu)) {
4039 kvm_s390_backup_guest_per_regs(vcpu);
4040 kvm_s390_patch_guest_per_regs(vcpu);
4041 }
4042
Michael Mueller9f30f622019-01-31 09:52:44 +01004043 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
4044
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004045 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004046 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4047 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4048 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004049
Thomas Huth3fb4c402013-09-12 10:33:43 +02004050 return 0;
4051}
4052
Thomas Huth492d8642015-02-10 16:11:01 +01004053static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4054{
David Hildenbrand56317922016-01-12 17:37:58 +01004055 struct kvm_s390_pgm_info pgm_info = {
4056 .code = PGM_ADDRESSING,
4057 };
4058 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004059 int rc;
4060
4061 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4062 trace_kvm_s390_sie_fault(vcpu);
4063
4064 /*
4065 * We want to inject an addressing exception, which is defined as a
4066 * suppressing or terminating exception. However, since we came here
4067 * by a DAT access exception, the PSW still points to the faulting
4068 * instruction since DAT exceptions are nullifying. So we've got
4069 * to look up the current opcode to get the length of the instruction
4070 * to be able to forward the PSW.
4071 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004072 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004073 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004074 if (rc < 0) {
4075 return rc;
4076 } else if (rc) {
4077 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4078 * Forward by arbitrary ilc, injection will take care of
4079 * nullification if necessary.
4080 */
4081 pgm_info = vcpu->arch.pgm;
4082 ilen = 4;
4083 }
David Hildenbrand56317922016-01-12 17:37:58 +01004084 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4085 kvm_s390_forward_psw(vcpu, ilen);
4086 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004087}
4088
Thomas Huth3fb4c402013-09-12 10:33:43 +02004089static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4090{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004091 struct mcck_volatile_info *mcck_info;
4092 struct sie_page *sie_page;
4093
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004094 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4095 vcpu->arch.sie_block->icptcode);
4096 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4097
David Hildenbrand27291e22014-01-23 12:26:52 +01004098 if (guestdbg_enabled(vcpu))
4099 kvm_s390_restore_guest_per_regs(vcpu);
4100
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004101 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4102 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004103
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004104 if (exit_reason == -EINTR) {
4105 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4106 sie_page = container_of(vcpu->arch.sie_block,
4107 struct sie_page, sie_block);
4108 mcck_info = &sie_page->mcck_info;
4109 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4110 return 0;
4111 }
4112
David Hildenbrand71f116b2015-10-19 16:24:28 +02004113 if (vcpu->arch.sie_block->icptcode > 0) {
4114 int rc = kvm_handle_sie_intercept(vcpu);
4115
4116 if (rc != -EOPNOTSUPP)
4117 return rc;
4118 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4119 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4120 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4121 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4122 return -EREMOTE;
4123 } else if (exit_reason != -EFAULT) {
4124 vcpu->stat.exit_null++;
4125 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004126 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4127 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4128 vcpu->run->s390_ucontrol.trans_exc_code =
4129 current->thread.gmap_addr;
4130 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004131 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004132 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004133 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004134 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004135 if (kvm_arch_setup_async_pf(vcpu))
4136 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004137 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004138 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004139 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004140 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004141}
4142
Janosch Frank3adae0b2019-12-13 08:26:06 -05004143#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004144static int __vcpu_run(struct kvm_vcpu *vcpu)
4145{
4146 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004147 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004148
Thomas Huth800c1062013-09-12 10:33:45 +02004149 /*
4150 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4151 * ning the guest), so that memslots (and other stuff) are protected
4152 */
4153 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4154
Thomas Hutha76ccff2013-09-12 10:33:44 +02004155 do {
4156 rc = vcpu_pre_run(vcpu);
4157 if (rc)
4158 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004159
Thomas Huth800c1062013-09-12 10:33:45 +02004160 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004161 /*
4162 * As PF_VCPU will be used in fault handler, between
4163 * guest_enter and guest_exit should be no uaccess.
4164 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004165 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004166 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004167 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004168 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004169 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4170 memcpy(sie_page->pv_grregs,
4171 vcpu->run->s.regs.gprs,
4172 sizeof(sie_page->pv_grregs));
4173 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004174 if (test_cpu_flag(CIF_FPU))
4175 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004176 exit_reason = sie64a(vcpu->arch.sie_block,
4177 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004178 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4179 memcpy(vcpu->run->s.regs.gprs,
4180 sie_page->pv_grregs,
4181 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004182 /*
4183 * We're not allowed to inject interrupts on intercepts
4184 * that leave the guest state in an "in-between" state
4185 * where the next SIE entry will do a continuation.
4186 * Fence interrupts in our "internal" PSW.
4187 */
4188 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4189 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4190 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4191 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004192 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004193 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004194 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004195 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004196 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004197 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004198
Thomas Hutha76ccff2013-09-12 10:33:44 +02004199 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004200 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004201
Thomas Huth800c1062013-09-12 10:33:45 +02004202 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004203 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004204}
4205
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004206static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004207{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004208 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004209 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004210 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004211
4212 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004213 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004214 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4215 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004216 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004217 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4218 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4219 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4220 }
4221 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4222 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4223 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4224 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004225 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4226 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004227 }
Collin Walling23a60f82020-06-22 11:46:36 -04004228 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4229 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4230 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4231 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004232 /*
4233 * If userspace sets the riccb (e.g. after migration) to a valid state,
4234 * we should enable RI here instead of doing the lazy enablement.
4235 */
4236 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004237 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004238 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004239 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004240 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004241 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004242 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004243 /*
4244 * If userspace sets the gscb (e.g. after migration) to non-zero,
4245 * we should enable GS here instead of doing the lazy enablement.
4246 */
4247 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4248 test_kvm_facility(vcpu->kvm, 133) &&
4249 gscb->gssm &&
4250 !vcpu->arch.gs_enabled) {
4251 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4252 vcpu->arch.sie_block->ecb |= ECB_GS;
4253 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4254 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004255 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004256 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4257 test_kvm_facility(vcpu->kvm, 82)) {
4258 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4259 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4260 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004261 if (MACHINE_HAS_GS) {
4262 preempt_disable();
4263 __ctl_set_bit(2, 4);
4264 if (current->thread.gs_cb) {
4265 vcpu->arch.host_gscb = current->thread.gs_cb;
4266 save_gs_cb(vcpu->arch.host_gscb);
4267 }
4268 if (vcpu->arch.gs_enabled) {
4269 current->thread.gs_cb = (struct gs_cb *)
4270 &vcpu->run->s.regs.gscb;
4271 restore_gs_cb(current->thread.gs_cb);
4272 }
4273 preempt_enable();
4274 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004275 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004276}
4277
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004278static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004279{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004280 struct kvm_run *kvm_run = vcpu->run;
4281
Janosch Frank811ea792019-06-14 13:11:21 +02004282 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4283 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4284 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4285 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4286 /* some control register changes require a tlb flush */
4287 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4288 }
4289 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4290 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4291 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4292 }
4293 save_access_regs(vcpu->arch.host_acrs);
4294 restore_access_regs(vcpu->run->s.regs.acrs);
4295 /* save host (userspace) fprs/vrs */
4296 save_fpu_regs();
4297 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4298 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4299 if (MACHINE_HAS_VX)
4300 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4301 else
4302 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4303 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4304 if (test_fp_ctl(current->thread.fpu.fpc))
4305 /* User space provided an invalid FPC, let's clear it */
4306 current->thread.fpu.fpc = 0;
4307
4308 /* Sync fmt2 only data */
4309 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004310 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004311 } else {
4312 /*
4313 * In several places we have to modify our internal view to
4314 * not do things that are disallowed by the ultravisor. For
4315 * example we must not inject interrupts after specific exits
4316 * (e.g. 112 prefix page not secure). We do this by turning
4317 * off the machine check, external and I/O interrupt bits
4318 * of our PSW copy. To avoid getting validity intercepts, we
4319 * do only accept the condition code from userspace.
4320 */
4321 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4322 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4323 PSW_MASK_CC;
4324 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004325
David Hildenbrandb028ee32014-07-17 10:47:43 +02004326 kvm_run->kvm_dirty_regs = 0;
4327}
4328
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004329static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004330{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004331 struct kvm_run *kvm_run = vcpu->run;
4332
David Hildenbrandb028ee32014-07-17 10:47:43 +02004333 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4334 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4335 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004336 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004337 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004338 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004339 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004340 __ctl_set_bit(2, 4);
4341 if (vcpu->arch.gs_enabled)
4342 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004343 current->thread.gs_cb = vcpu->arch.host_gscb;
4344 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004345 if (!vcpu->arch.host_gscb)
4346 __ctl_clear_bit(2, 4);
4347 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004348 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004349 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004350 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004351}
4352
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004353static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004354{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004355 struct kvm_run *kvm_run = vcpu->run;
4356
Janosch Frank811ea792019-06-14 13:11:21 +02004357 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4358 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4359 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4360 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4361 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4362 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4363 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4364 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4365 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4366 save_access_regs(vcpu->run->s.regs.acrs);
4367 restore_access_regs(vcpu->arch.host_acrs);
4368 /* Save guest register state */
4369 save_fpu_regs();
4370 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4371 /* Restore will be done lazily at return */
4372 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4373 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4374 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004375 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004376}
4377
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004378int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004379{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004380 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004381 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004382
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004383 if (kvm_run->immediate_exit)
4384 return -EINTR;
4385
Thomas Huth200824f2019-09-04 10:51:59 +02004386 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4387 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4388 return -EINVAL;
4389
Christoffer Dallaccb7572017-12-04 21:35:25 +01004390 vcpu_load(vcpu);
4391
David Hildenbrand27291e22014-01-23 12:26:52 +01004392 if (guestdbg_exit_pending(vcpu)) {
4393 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004394 rc = 0;
4395 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004396 }
4397
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004398 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004399
Janosch Frankfe28c7862019-05-15 13:24:30 +02004400 /*
4401 * no need to check the return value of vcpu_start as it can only have
4402 * an error for protvirt, but protvirt means user cpu state
4403 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004404 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4405 kvm_s390_vcpu_start(vcpu);
4406 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004407 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004408 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004409 rc = -EINVAL;
4410 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004411 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004412
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004413 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004414 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004415
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004416 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004417 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004418
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004419 if (signal_pending(current) && !rc) {
4420 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004421 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004422 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004423
David Hildenbrand27291e22014-01-23 12:26:52 +01004424 if (guestdbg_exit_pending(vcpu) && !rc) {
4425 kvm_s390_prepare_debug_exit(vcpu);
4426 rc = 0;
4427 }
4428
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004429 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004430 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004431 rc = 0;
4432 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004433
David Hildenbranddb0758b2016-02-15 09:42:25 +01004434 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004435 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004436
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004437 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004438
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004439 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004440out:
4441 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004442 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004443}
4444
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004445/*
4446 * store status at address
4447 * we use have two special cases:
4448 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4449 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4450 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004451int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004452{
Carsten Otte092670c2011-07-24 10:48:22 +02004453 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004454 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004455 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004456 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004457 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004458
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004459 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004460 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4461 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004462 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004463 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004464 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4465 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004466 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004467 gpa = px;
4468 } else
4469 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004470
4471 /* manually convert vector registers if necessary */
4472 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004473 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004474 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4475 fprs, 128);
4476 } else {
4477 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004478 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004479 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004480 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004481 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004482 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004483 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004484 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004485 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004486 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004487 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004488 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004489 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004490 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004491 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004492 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004493 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004494 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004495 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004496 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004497 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004498 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004499 &vcpu->arch.sie_block->gcr, 128);
4500 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004501}
4502
Thomas Huthe8798922013-11-06 15:46:33 +01004503int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4504{
4505 /*
4506 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004507 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004508 * it into the save area
4509 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004510 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004511 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004512 save_access_regs(vcpu->run->s.regs.acrs);
4513
4514 return kvm_s390_store_status_unloaded(vcpu, addr);
4515}
4516
David Hildenbrand8ad35752014-03-14 11:00:21 +01004517static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4518{
4519 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004520 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004521}
4522
4523static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4524{
4525 unsigned int i;
4526 struct kvm_vcpu *vcpu;
4527
4528 kvm_for_each_vcpu(i, vcpu, kvm) {
4529 __disable_ibs_on_vcpu(vcpu);
4530 }
4531}
4532
4533static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4534{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004535 if (!sclp.has_ibs)
4536 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004537 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004538 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004539}
4540
Janosch Frankfe28c7862019-05-15 13:24:30 +02004541int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004542{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004543 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004544
4545 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004546 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004547
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004548 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004549 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004550 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004551 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4552
Janosch Frankfe28c7862019-05-15 13:24:30 +02004553 /* Let's tell the UV that we want to change into the operating state */
4554 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4555 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4556 if (r) {
4557 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4558 return r;
4559 }
4560 }
4561
David Hildenbrand8ad35752014-03-14 11:00:21 +01004562 for (i = 0; i < online_vcpus; i++) {
4563 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4564 started_vcpus++;
4565 }
4566
4567 if (started_vcpus == 0) {
4568 /* we're the only active VCPU -> speed it up */
4569 __enable_ibs_on_vcpu(vcpu);
4570 } else if (started_vcpus == 1) {
4571 /*
4572 * As we are starting a second VCPU, we have to disable
4573 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304574 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004575 */
4576 __disable_ibs_on_all_vcpus(vcpu->kvm);
4577 }
4578
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004579 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004580 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004581 * The real PSW might have changed due to a RESTART interpreted by the
4582 * ultravisor. We block all interrupts and let the next sie exit
4583 * refresh our view.
4584 */
4585 if (kvm_s390_pv_cpu_is_protected(vcpu))
4586 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4587 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004588 * Another VCPU might have used IBS while we were offline.
4589 * Let's play safe and flush the VCPU at startup.
4590 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004591 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004592 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004593 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004594}
4595
Janosch Frankfe28c7862019-05-15 13:24:30 +02004596int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004597{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004598 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004599 struct kvm_vcpu *started_vcpu = NULL;
4600
4601 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004602 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004603
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004604 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004605 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004606 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004607 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4608
Janosch Frankfe28c7862019-05-15 13:24:30 +02004609 /* Let's tell the UV that we want to change into the stopped state */
4610 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4611 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4612 if (r) {
4613 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4614 return r;
4615 }
4616 }
4617
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004618 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004619 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004620
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004621 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004622 __disable_ibs_on_vcpu(vcpu);
4623
4624 for (i = 0; i < online_vcpus; i++) {
4625 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4626 started_vcpus++;
4627 started_vcpu = vcpu->kvm->vcpus[i];
4628 }
4629 }
4630
4631 if (started_vcpus == 1) {
4632 /*
4633 * As we only have one VCPU left, we want to enable the
4634 * IBS facility for that VCPU to speed it up.
4635 */
4636 __enable_ibs_on_vcpu(started_vcpu);
4637 }
4638
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004639 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004640 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004641}
4642
Cornelia Huckd6712df2012-12-20 15:32:11 +01004643static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4644 struct kvm_enable_cap *cap)
4645{
4646 int r;
4647
4648 if (cap->flags)
4649 return -EINVAL;
4650
4651 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004652 case KVM_CAP_S390_CSS_SUPPORT:
4653 if (!vcpu->kvm->arch.css_support) {
4654 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004655 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004656 trace_kvm_s390_enable_css(vcpu->kvm);
4657 }
4658 r = 0;
4659 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004660 default:
4661 r = -EINVAL;
4662 break;
4663 }
4664 return r;
4665}
4666
Janosch Frank19e12272019-04-02 09:21:06 +02004667static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4668 struct kvm_s390_mem_op *mop)
4669{
4670 void __user *uaddr = (void __user *)mop->buf;
4671 int r = 0;
4672
4673 if (mop->flags || !mop->size)
4674 return -EINVAL;
4675 if (mop->size + mop->sida_offset < mop->size)
4676 return -EINVAL;
4677 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4678 return -E2BIG;
4679
4680 switch (mop->op) {
4681 case KVM_S390_MEMOP_SIDA_READ:
4682 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4683 mop->sida_offset), mop->size))
4684 r = -EFAULT;
4685
4686 break;
4687 case KVM_S390_MEMOP_SIDA_WRITE:
4688 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4689 mop->sida_offset), uaddr, mop->size))
4690 r = -EFAULT;
4691 break;
4692 }
4693 return r;
4694}
Thomas Huth41408c282015-02-06 15:01:21 +01004695static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4696 struct kvm_s390_mem_op *mop)
4697{
4698 void __user *uaddr = (void __user *)mop->buf;
4699 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004700 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004701 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4702 | KVM_S390_MEMOP_F_CHECK_ONLY;
4703
Thomas Hutha13b03b2019-08-29 14:25:17 +02004704 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004705 return -EINVAL;
4706
4707 if (mop->size > MEM_OP_MAX_SIZE)
4708 return -E2BIG;
4709
Janosch Frank19e12272019-04-02 09:21:06 +02004710 if (kvm_s390_pv_cpu_is_protected(vcpu))
4711 return -EINVAL;
4712
Thomas Huth41408c282015-02-06 15:01:21 +01004713 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4714 tmpbuf = vmalloc(mop->size);
4715 if (!tmpbuf)
4716 return -ENOMEM;
4717 }
4718
Thomas Huth41408c282015-02-06 15:01:21 +01004719 switch (mop->op) {
4720 case KVM_S390_MEMOP_LOGICAL_READ:
4721 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004722 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4723 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004724 break;
4725 }
4726 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4727 if (r == 0) {
4728 if (copy_to_user(uaddr, tmpbuf, mop->size))
4729 r = -EFAULT;
4730 }
4731 break;
4732 case KVM_S390_MEMOP_LOGICAL_WRITE:
4733 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004734 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4735 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004736 break;
4737 }
4738 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4739 r = -EFAULT;
4740 break;
4741 }
4742 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4743 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004744 }
4745
Thomas Huth41408c282015-02-06 15:01:21 +01004746 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4747 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4748
4749 vfree(tmpbuf);
4750 return r;
4751}
4752
Janosch Frank19e12272019-04-02 09:21:06 +02004753static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4754 struct kvm_s390_mem_op *mop)
4755{
4756 int r, srcu_idx;
4757
4758 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4759
4760 switch (mop->op) {
4761 case KVM_S390_MEMOP_LOGICAL_READ:
4762 case KVM_S390_MEMOP_LOGICAL_WRITE:
4763 r = kvm_s390_guest_mem_op(vcpu, mop);
4764 break;
4765 case KVM_S390_MEMOP_SIDA_READ:
4766 case KVM_S390_MEMOP_SIDA_WRITE:
4767 /* we are locked against sida going away by the vcpu->mutex */
4768 r = kvm_s390_guest_sida_op(vcpu, mop);
4769 break;
4770 default:
4771 r = -EINVAL;
4772 }
4773
4774 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4775 return r;
4776}
4777
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004778long kvm_arch_vcpu_async_ioctl(struct file *filp,
4779 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004780{
4781 struct kvm_vcpu *vcpu = filp->private_data;
4782 void __user *argp = (void __user *)arg;
4783
Avi Kivity93736622010-05-13 12:35:17 +03004784 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004785 case KVM_S390_IRQ: {
4786 struct kvm_s390_irq s390irq;
4787
Jens Freimann47b43c52014-11-11 20:57:06 +01004788 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004789 return -EFAULT;
4790 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004791 }
Avi Kivity93736622010-05-13 12:35:17 +03004792 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004793 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004794 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004795
4796 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004797 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004798 if (s390int_to_s390irq(&s390int, &s390irq))
4799 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004800 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004801 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004802 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004803 return -ENOIOCTLCMD;
4804}
4805
4806long kvm_arch_vcpu_ioctl(struct file *filp,
4807 unsigned int ioctl, unsigned long arg)
4808{
4809 struct kvm_vcpu *vcpu = filp->private_data;
4810 void __user *argp = (void __user *)arg;
4811 int idx;
4812 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004813 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004814
4815 vcpu_load(vcpu);
4816
4817 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004818 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004819 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004820 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004821 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004822 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004823 case KVM_S390_SET_INITIAL_PSW: {
4824 psw_t psw;
4825
Avi Kivitybc923cc2010-05-13 12:21:46 +03004826 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004827 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004828 break;
4829 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4830 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004831 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004832 case KVM_S390_CLEAR_RESET:
4833 r = 0;
4834 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004835 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4836 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4837 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4838 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4839 rc, rrc);
4840 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004841 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004842 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004843 r = 0;
4844 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004845 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4846 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4847 UVC_CMD_CPU_RESET_INITIAL,
4848 &rc, &rrc);
4849 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4850 rc, rrc);
4851 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004852 break;
4853 case KVM_S390_NORMAL_RESET:
4854 r = 0;
4855 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004856 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4857 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4858 UVC_CMD_CPU_RESET, &rc, &rrc);
4859 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4860 rc, rrc);
4861 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004862 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004863 case KVM_SET_ONE_REG:
4864 case KVM_GET_ONE_REG: {
4865 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004866 r = -EINVAL;
4867 if (kvm_s390_pv_cpu_is_protected(vcpu))
4868 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004869 r = -EFAULT;
4870 if (copy_from_user(&reg, argp, sizeof(reg)))
4871 break;
4872 if (ioctl == KVM_SET_ONE_REG)
4873 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4874 else
4875 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4876 break;
4877 }
Carsten Otte27e03932012-01-04 10:25:21 +01004878#ifdef CONFIG_KVM_S390_UCONTROL
4879 case KVM_S390_UCAS_MAP: {
4880 struct kvm_s390_ucas_mapping ucasmap;
4881
4882 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4883 r = -EFAULT;
4884 break;
4885 }
4886
4887 if (!kvm_is_ucontrol(vcpu->kvm)) {
4888 r = -EINVAL;
4889 break;
4890 }
4891
4892 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4893 ucasmap.vcpu_addr, ucasmap.length);
4894 break;
4895 }
4896 case KVM_S390_UCAS_UNMAP: {
4897 struct kvm_s390_ucas_mapping ucasmap;
4898
4899 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4900 r = -EFAULT;
4901 break;
4902 }
4903
4904 if (!kvm_is_ucontrol(vcpu->kvm)) {
4905 r = -EINVAL;
4906 break;
4907 }
4908
4909 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4910 ucasmap.length);
4911 break;
4912 }
4913#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004914 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004915 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004916 break;
4917 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004918 case KVM_ENABLE_CAP:
4919 {
4920 struct kvm_enable_cap cap;
4921 r = -EFAULT;
4922 if (copy_from_user(&cap, argp, sizeof(cap)))
4923 break;
4924 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4925 break;
4926 }
Thomas Huth41408c282015-02-06 15:01:21 +01004927 case KVM_S390_MEM_OP: {
4928 struct kvm_s390_mem_op mem_op;
4929
4930 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004931 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004932 else
4933 r = -EFAULT;
4934 break;
4935 }
Jens Freimann816c7662014-11-24 17:13:46 +01004936 case KVM_S390_SET_IRQ_STATE: {
4937 struct kvm_s390_irq_state irq_state;
4938
4939 r = -EFAULT;
4940 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4941 break;
4942 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4943 irq_state.len == 0 ||
4944 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4945 r = -EINVAL;
4946 break;
4947 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004948 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004949 r = kvm_s390_set_irq_state(vcpu,
4950 (void __user *) irq_state.buf,
4951 irq_state.len);
4952 break;
4953 }
4954 case KVM_S390_GET_IRQ_STATE: {
4955 struct kvm_s390_irq_state irq_state;
4956
4957 r = -EFAULT;
4958 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4959 break;
4960 if (irq_state.len == 0) {
4961 r = -EINVAL;
4962 break;
4963 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004964 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004965 r = kvm_s390_get_irq_state(vcpu,
4966 (__u8 __user *) irq_state.buf,
4967 irq_state.len);
4968 break;
4969 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004970 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004971 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004972 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004973
4974 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004975 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004976}
4977
Souptick Joarder1499fa82018-04-19 00:49:58 +05304978vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004979{
4980#ifdef CONFIG_KVM_S390_UCONTROL
4981 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4982 && (kvm_is_ucontrol(vcpu->kvm))) {
4983 vmf->page = virt_to_page(vcpu->arch.sie_block);
4984 get_page(vmf->page);
4985 return 0;
4986 }
4987#endif
4988 return VM_FAULT_SIGBUS;
4989}
4990
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004991/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004992int kvm_arch_prepare_memory_region(struct kvm *kvm,
4993 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004994 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004995 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004996{
Nick Wangdd2887e2013-03-25 17:22:57 +01004997 /* A few sanity checks. We can have memory slots which have to be
4998 located/ended at a segment boundary (1MB). The memory in userland is
4999 ok to be fragmented into various different vmas. It is okay to mmap()
5000 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005001
Carsten Otte598841c2011-07-24 10:48:21 +02005002 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005003 return -EINVAL;
5004
Carsten Otte598841c2011-07-24 10:48:21 +02005005 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005006 return -EINVAL;
5007
Dominik Dingela3a92c32014-12-01 17:24:42 +01005008 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5009 return -EINVAL;
5010
Janosch Frank29b40f12019-09-30 04:19:18 -04005011 /* When we are protected, we should not change the memory slots */
5012 if (kvm_s390_pv_get_handle(kvm))
5013 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005014 return 0;
5015}
5016
5017void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005018 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005019 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005020 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005021 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005022{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005023 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005024
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005025 switch (change) {
5026 case KVM_MR_DELETE:
5027 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5028 old->npages * PAGE_SIZE);
5029 break;
5030 case KVM_MR_MOVE:
5031 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5032 old->npages * PAGE_SIZE);
5033 if (rc)
5034 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005035 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005036 case KVM_MR_CREATE:
5037 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5038 mem->guest_phys_addr, mem->memory_size);
5039 break;
5040 case KVM_MR_FLAGS_ONLY:
5041 break;
5042 default:
5043 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5044 }
Carsten Otte598841c2011-07-24 10:48:21 +02005045 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005046 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005047 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005048}
5049
Alexander Yarygin60a37702016-04-01 15:38:57 +03005050static inline unsigned long nonhyp_mask(int i)
5051{
5052 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5053
5054 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5055}
5056
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005057void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5058{
5059 vcpu->valid_wakeup = false;
5060}
5061
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005062static int __init kvm_s390_init(void)
5063{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005064 int i;
5065
David Hildenbrand07197fd2015-01-30 16:01:38 +01005066 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005067 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005068 return -ENODEV;
5069 }
5070
Janosch Franka4499382018-07-13 11:28:31 +01005071 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005072 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005073 return -EINVAL;
5074 }
5075
Alexander Yarygin60a37702016-04-01 15:38:57 +03005076 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005077 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03005078 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
5079
Michael Mueller9d8d5782015-02-02 15:42:51 +01005080 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005081}
5082
5083static void __exit kvm_s390_exit(void)
5084{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005085 kvm_exit();
5086}
5087
5088module_init(kvm_s390_init);
5089module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005090
5091/*
5092 * Enable autoloading of the kvm module.
5093 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5094 * since x86 takes a different approach.
5095 */
5096#include <linux/miscdevice.h>
5097MODULE_ALIAS_MISCDEV(KVM_MINOR);
5098MODULE_ALIAS("devname:kvm");