blob: 2296b1ff1e0232ca3cb54e7f095834ab2addd555 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000069
70const struct kvm_stats_header kvm_vm_stats_header = {
71 .name_size = KVM_STATS_NAME_SIZE,
72 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
73 .id_offset = sizeof(struct kvm_stats_header),
74 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
75 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
76 sizeof(kvm_vm_stats_desc),
77};
78
Jing Zhangce55c042021-06-18 22:27:06 +000079const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
80 KVM_GENERIC_VCPU_STATS(),
81 STATS_DESC_COUNTER(VCPU, exit_userspace),
82 STATS_DESC_COUNTER(VCPU, exit_null),
83 STATS_DESC_COUNTER(VCPU, exit_external_request),
84 STATS_DESC_COUNTER(VCPU, exit_io_request),
85 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
86 STATS_DESC_COUNTER(VCPU, exit_stop_request),
87 STATS_DESC_COUNTER(VCPU, exit_validity),
88 STATS_DESC_COUNTER(VCPU, exit_instruction),
89 STATS_DESC_COUNTER(VCPU, exit_pei),
90 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
91 STATS_DESC_COUNTER(VCPU, instruction_lctl),
92 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
93 STATS_DESC_COUNTER(VCPU, instruction_stctl),
94 STATS_DESC_COUNTER(VCPU, instruction_stctg),
95 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
96 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
97 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
98 STATS_DESC_COUNTER(VCPU, deliver_ckc),
99 STATS_DESC_COUNTER(VCPU, deliver_cputm),
100 STATS_DESC_COUNTER(VCPU, deliver_external_call),
101 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
102 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
103 STATS_DESC_COUNTER(VCPU, deliver_virtio),
104 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
106 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_program),
108 STATS_DESC_COUNTER(VCPU, deliver_io),
109 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
110 STATS_DESC_COUNTER(VCPU, exit_wait_state),
111 STATS_DESC_COUNTER(VCPU, inject_ckc),
112 STATS_DESC_COUNTER(VCPU, inject_cputm),
113 STATS_DESC_COUNTER(VCPU, inject_external_call),
114 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
115 STATS_DESC_COUNTER(VCPU, inject_mchk),
116 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
117 STATS_DESC_COUNTER(VCPU, inject_program),
118 STATS_DESC_COUNTER(VCPU, inject_restart),
119 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
120 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
121 STATS_DESC_COUNTER(VCPU, instruction_epsw),
122 STATS_DESC_COUNTER(VCPU, instruction_gs),
123 STATS_DESC_COUNTER(VCPU, instruction_io_other),
124 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
125 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
126 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
127 STATS_DESC_COUNTER(VCPU, instruction_ptff),
128 STATS_DESC_COUNTER(VCPU, instruction_sck),
129 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
130 STATS_DESC_COUNTER(VCPU, instruction_stidp),
131 STATS_DESC_COUNTER(VCPU, instruction_spx),
132 STATS_DESC_COUNTER(VCPU, instruction_stpx),
133 STATS_DESC_COUNTER(VCPU, instruction_stap),
134 STATS_DESC_COUNTER(VCPU, instruction_iske),
135 STATS_DESC_COUNTER(VCPU, instruction_ri),
136 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
137 STATS_DESC_COUNTER(VCPU, instruction_sske),
138 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
139 STATS_DESC_COUNTER(VCPU, instruction_stsi),
140 STATS_DESC_COUNTER(VCPU, instruction_stfl),
141 STATS_DESC_COUNTER(VCPU, instruction_tb),
142 STATS_DESC_COUNTER(VCPU, instruction_tpi),
143 STATS_DESC_COUNTER(VCPU, instruction_tprot),
144 STATS_DESC_COUNTER(VCPU, instruction_tsch),
145 STATS_DESC_COUNTER(VCPU, instruction_sie),
146 STATS_DESC_COUNTER(VCPU, instruction_essa),
147 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
148 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
149 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
Christian Borntraegerbb000f642021-07-26 17:01:08 +0200164 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
165 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
167 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
168 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
169 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
170 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
Jing Zhangce55c042021-06-18 22:27:06 +0000173 STATS_DESC_COUNTER(VCPU, pfault_sync)
174};
Jing Zhangce55c042021-06-18 22:27:06 +0000175
176const struct kvm_stats_header kvm_vcpu_stats_header = {
177 .name_size = KVM_STATS_NAME_SIZE,
178 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
179 .id_offset = sizeof(struct kvm_stats_header),
180 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
181 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
182 sizeof(kvm_vcpu_stats_desc),
183};
184
David Hildenbranda411edf2016-02-02 15:41:22 +0100185/* allow nested virtualization in KVM (if enabled by user space) */
186static int nested;
187module_param(nested, int, S_IRUGO);
188MODULE_PARM_DESC(nested, "Nested virtualization support");
189
Janosch Franka4499382018-07-13 11:28:31 +0100190/* allow 1m huge page guest backing, if !nested */
191static int hpage;
192module_param(hpage, int, 0444);
193MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100194
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500195/* maximum percentage of steal time for polling. >100 is treated like 100 */
196static u8 halt_poll_max_steal = 10;
197module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000198MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199
Michael Muellercc674ef2020-02-27 10:10:31 +0100200/* if set to true, the GISA will be initialized and used if available */
201static bool use_gisa = true;
202module_param(use_gisa, bool, 0644);
203MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
204
Pierre Morel87e28a12020-09-07 15:26:07 +0200205/* maximum diag9c forwarding per second */
206unsigned int diag9c_forwarding_hz;
207module_param(diag9c_forwarding_hz, uint, 0644);
208MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
209
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000210/*
211 * For now we handle at most 16 double words as this is what the s390 base
212 * kernel handles and stores in the prefix page. If we ever need to go beyond
213 * this, this requires changes to code, but the external uapi can stay.
214 */
215#define SIZE_INTERNAL 16
216
217/*
218 * Base feature mask that defines default mask for facilities. Consists of the
219 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
220 */
221static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
222/*
223 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
224 * and defines the facilities that can be enabled via a cpu model.
225 */
226static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
227
228static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200229{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000230 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
231 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
232 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
Sven Schnelle17e89e12021-05-05 22:01:10 +0200233 sizeof(stfle_fac_list));
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234
235 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200236}
237
David Hildenbrand15c97052015-03-19 17:36:43 +0100238/* available cpu features supported by kvm */
239static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200240/* available subfunctions indicated via query / "test bit" */
241static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100242
Michael Mueller9d8d5782015-02-02 15:42:51 +0100243static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200244static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200245debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400246debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100248/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200249int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250{
251 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200252 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253}
254
Sean Christophersonb9904082020-03-21 13:25:55 -0700255int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700256{
257 return 0;
258}
259
Janosch Frank29b40f12019-09-30 04:19:18 -0400260/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100261static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
262 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400263static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200264
David Hildenbrand15757672018-02-07 12:46:45 +0100265static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
266{
267 u8 delta_idx = 0;
268
269 /*
270 * The TOD jumps by delta, we have to compensate this by adding
271 * -delta to the epoch.
272 */
273 delta = -delta;
274
275 /* sign-extension - we're adding to signed values below */
276 if ((s64)delta < 0)
277 delta_idx = -1;
278
279 scb->epoch += delta;
280 if (scb->ecd & ECD_MEF) {
281 scb->epdx += delta_idx;
282 if (scb->epoch < delta)
283 scb->epdx += 1;
284 }
285}
286
Fan Zhangfdf03652015-05-13 10:58:41 +0200287/*
288 * This callback is executed during stop_machine(). All CPUs are therefore
289 * temporarily stopped. In order not to change guest behavior, we have to
290 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
291 * so a CPU won't be stopped while calculating with the epoch.
292 */
293static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
294 void *v)
295{
296 struct kvm *kvm;
297 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000298 unsigned long i;
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 unsigned long long *delta = v;
300
301 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100303 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
304 if (i == 0) {
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
307 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100308 if (vcpu->arch.cputm_enabled)
309 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100310 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100311 kvm_clock_sync_scb(vcpu->arch.vsie_block,
312 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200313 }
314 }
315 return NOTIFY_OK;
316}
317
318static struct notifier_block kvm_clock_notifier = {
319 .notifier_call = kvm_clock_sync,
320};
321
Sean Christophersonb9904082020-03-21 13:25:55 -0700322int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200324 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100325 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200326 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
327 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200328 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
329 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330 return 0;
331}
332
333void kvm_arch_hardware_unsetup(void)
334{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100335 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200336 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200337 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
338 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339}
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341static void allow_cpu_feat(unsigned long nr)
342{
343 set_bit_inv(nr, kvm_s390_available_cpu_feat);
344}
345
David Hildenbrand0a763c72016-05-18 16:03:47 +0200346static inline int plo_test_bit(unsigned char nr)
347{
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200348 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100349 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350
351 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200352 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 /* Parameter registers are ignored for "test bit" */
354 " plo 0,0,0,0(0)\n"
355 " ipm %0\n"
356 " srl %0,28\n"
357 : "=d" (cc)
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200358 : [function] "d" (function)
359 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200360 return cc == 0;
361}
362
Heiko Carstensd0dea732019-10-02 14:34:37 +0200363static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500364{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500365 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200366 " lghi 0,0\n"
367 " lgr 1,%[query]\n"
368 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500369 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200370 :
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200371 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
372 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500373}
374
Christian Borntraeger173aec22018-12-28 10:59:06 +0100375#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100376#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100377
David Hildenbrand22be5a132016-01-21 13:22:54 +0100378static void kvm_s390_cpu_feat_init(void)
379{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 int i;
381
382 for (i = 0; i < 256; ++i) {
383 if (plo_test_bit(i))
384 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
385 }
386
387 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400388 ptff(kvm_s390_available_subfunc.ptff,
389 sizeof(kvm_s390_available_subfunc.ptff),
390 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200391
392 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
394 kvm_s390_available_subfunc.kmac);
395 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kmc);
397 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.km);
399 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kimd);
401 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200403 }
404 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200405 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200408 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kmctr);
410 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
411 kvm_s390_available_subfunc.kmf);
412 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmo);
414 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200416 }
417 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100418 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200419 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400421 if (test_facility(146)) /* MSA8 */
422 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
423 kvm_s390_available_subfunc.kma);
424
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100425 if (test_facility(155)) /* MSA9 */
426 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kdsa);
428
Christian Borntraeger173aec22018-12-28 10:59:06 +0100429 if (test_facility(150)) /* SORTL */
430 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
431
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100432 if (test_facility(151)) /* DFLTCC */
433 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
434
David Hildenbrand22be5a132016-01-21 13:22:54 +0100435 if (MACHINE_HAS_ESOP)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200437 /*
438 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
439 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
440 */
441 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100442 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200443 return;
444 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100445 if (sclp.has_64bscao)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100447 if (sclp.has_siif)
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100449 if (sclp.has_gpere)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100451 if (sclp.has_gsls)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100453 if (sclp.has_ib)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100455 if (sclp.has_cei)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100457 if (sclp.has_ibs)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500459 if (sclp.has_kss)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200461 /*
462 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
463 * all skey handling functions read/set the skey from the PGSTE
464 * instead of the real storage key.
465 *
466 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
467 * pages being detected as preserved although they are resident.
468 *
469 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
470 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
471 *
472 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
473 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
474 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
475 *
476 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
477 * cannot easily shadow the SCA because of the ipte lock.
478 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100479}
480
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100481int kvm_arch_init(void *opaque)
482{
Janosch Frankf76f6372019-10-02 03:56:27 -0400483 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100484
Christian Borntraeger78f26132015-07-22 15:50:58 +0200485 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
486 if (!kvm_s390_dbf)
487 return -ENOMEM;
488
Janosch Frank3e6c5562019-10-02 04:46:58 -0400489 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf_uv)
491 goto out;
492
493 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
494 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400495 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200496
David Hildenbrand22be5a132016-01-21 13:22:54 +0100497 kvm_s390_cpu_feat_init();
498
Cornelia Huck84877d92014-09-02 10:27:35 +0100499 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100500 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
501 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100502 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400503 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100505
506 rc = kvm_s390_gib_init(GAL_ISC);
507 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400508 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
Michael Mueller308c3e62018-11-30 15:32:06 +0100510 return 0;
511
Janosch Frankf76f6372019-10-02 03:56:27 -0400512out:
513 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100515}
516
Christian Borntraeger78f26132015-07-22 15:50:58 +0200517void kvm_arch_exit(void)
518{
Michael Mueller1282c212019-01-31 09:52:40 +0100519 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200520 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400521 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200522}
523
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100524/* Section: device related */
525long kvm_arch_dev_ioctl(struct file *filp,
526 unsigned int ioctl, unsigned long arg)
527{
528 if (ioctl == KVM_S390_ENABLE_SIE)
529 return s390_enable_sie();
530 return -EINVAL;
531}
532
Alexander Graf784aa3d2014-07-14 18:27:35 +0200533int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100534{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 int r;
536
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200537 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100538 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200539 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100540 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100541#ifdef CONFIG_KVM_S390_UCONTROL
542 case KVM_CAP_S390_UCONTROL:
543#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200544 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100545 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200546 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100547 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100548 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100549 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200550 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200551 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200552 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200553 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100554 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100555 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200556 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100557 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400558 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100559 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200560 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200561 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100562 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100563 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500564 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400565 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400566 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 r = 1;
568 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300569 case KVM_CAP_SET_GUEST_DEBUG2:
570 r = KVM_GUESTDBG_VALID_MASK;
571 break;
Janosch Franka4499382018-07-13 11:28:31 +0100572 case KVM_CAP_S390_HPAGE_1M:
573 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100574 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100575 r = 1;
576 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100577 case KVM_CAP_S390_MEM_OP:
578 r = MEM_OP_MAX_SIZE;
579 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200580 case KVM_CAP_NR_VCPUS:
581 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200582 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100583 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200584 if (!kvm_s390_use_sca_entries())
585 r = KVM_MAX_VCPUS;
586 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_ESCA_CPU_SLOTS;
Vitaly Kuznetsov82cc27e2021-11-16 17:34:42 +0100588 if (ext == KVM_CAP_NR_VCPUS)
589 r = min_t(unsigned int, num_online_cpus(), r);
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200590 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200591 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100592 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200593 break;
Eric Farman68c55752014-06-09 10:57:26 -0400594 case KVM_CAP_S390_VECTOR_REGISTERS:
595 r = MACHINE_HAS_VX;
596 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800597 case KVM_CAP_S390_RI:
598 r = test_facility(64);
599 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100600 case KVM_CAP_S390_GS:
601 r = test_facility(133);
602 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100603 case KVM_CAP_S390_BPB:
604 r = test_facility(82);
605 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500606 case KVM_CAP_S390_PROTECTED:
607 r = is_prot_virt_host();
608 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200609 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100610 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200611 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100612 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100613}
614
Sean Christopherson0dff0842020-02-18 13:07:29 -0800615void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400616{
Janosch Frank0959e162018-07-17 13:21:22 +0100617 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100619 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100621 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400622
Janosch Frank0959e162018-07-17 13:21:22 +0100623 /* Loop over all guest segments */
624 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400625 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100626 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
627 gaddr = gfn_to_gpa(cur_gfn);
628 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
629 if (kvm_is_error_hva(vmaddr))
630 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400631
Janosch Frank0959e162018-07-17 13:21:22 +0100632 bitmap_zero(bitmap, _PAGE_ENTRIES);
633 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
634 for (i = 0; i < _PAGE_ENTRIES; i++) {
635 if (test_bit(i, bitmap))
636 mark_page_dirty(kvm, cur_gfn + i);
637 }
638
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100639 if (fatal_signal_pending(current))
640 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100641 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400642 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400643}
644
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100645/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200646static void sca_del_vcpu(struct kvm_vcpu *vcpu);
647
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100648/*
649 * Get (and clear) the dirty memory log for a memory slot.
650 */
651int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
652 struct kvm_dirty_log *log)
653{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400654 int r;
655 unsigned long n;
656 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800657 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400658
Janosch Franke1e8a962017-02-02 16:39:31 +0100659 if (kvm_is_ucontrol(kvm))
660 return -EINVAL;
661
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400662 mutex_lock(&kvm->slots_lock);
663
664 r = -EINVAL;
665 if (log->slot >= KVM_USER_MEM_SLOTS)
666 goto out;
667
Sean Christopherson2a49f612020-02-18 13:07:30 -0800668 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400669 if (r)
670 goto out;
671
672 /* Clear the dirty log */
673 if (is_dirty) {
674 n = kvm_dirty_bitmap_bytes(memslot);
675 memset(memslot->dirty_bitmap, 0, n);
676 }
677 r = 0;
678out:
679 mutex_unlock(&kvm->slots_lock);
680 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681}
682
David Hildenbrand6502a342016-06-21 14:19:51 +0200683static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
684{
Marc Zyngier46808a42021-11-16 16:04:02 +0000685 unsigned long i;
David Hildenbrand6502a342016-06-21 14:19:51 +0200686 struct kvm_vcpu *vcpu;
687
688 kvm_for_each_vcpu(i, vcpu, kvm) {
689 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
690 }
691}
692
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100693int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200694{
695 int r;
696
697 if (cap->flags)
698 return -EINVAL;
699
700 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200701 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200702 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200703 kvm->arch.use_irqchip = 1;
704 r = 0;
705 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200706 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200707 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200708 kvm->arch.user_sigp = 1;
709 r = 0;
710 break;
Eric Farman68c55752014-06-09 10:57:26 -0400711 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100712 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200713 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 r = -EBUSY;
715 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100716 set_kvm_facility(kvm->arch.model.fac_mask, 129);
717 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200718 if (test_facility(134)) {
719 set_kvm_facility(kvm->arch.model.fac_mask, 134);
720 set_kvm_facility(kvm->arch.model.fac_list, 134);
721 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100722 if (test_facility(135)) {
723 set_kvm_facility(kvm->arch.model.fac_mask, 135);
724 set_kvm_facility(kvm->arch.model.fac_list, 135);
725 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100726 if (test_facility(148)) {
727 set_kvm_facility(kvm->arch.model.fac_mask, 148);
728 set_kvm_facility(kvm->arch.model.fac_list, 148);
729 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100730 if (test_facility(152)) {
731 set_kvm_facility(kvm->arch.model.fac_mask, 152);
732 set_kvm_facility(kvm->arch.model.fac_list, 152);
733 }
Christian Borntraeger1f703d22021-01-25 13:39:45 +0100734 if (test_facility(192)) {
735 set_kvm_facility(kvm->arch.model.fac_mask, 192);
736 set_kvm_facility(kvm->arch.model.fac_list, 192);
737 }
Michael Mueller18280d82015-03-16 16:05:41 +0100738 r = 0;
739 } else
740 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100741 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200742 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
743 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400744 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800745 case KVM_CAP_S390_RI:
746 r = -EINVAL;
747 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200748 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800749 r = -EBUSY;
750 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100751 set_kvm_facility(kvm->arch.model.fac_mask, 64);
752 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800753 r = 0;
754 }
755 mutex_unlock(&kvm->lock);
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
757 r ? "(not available)" : "(success)");
758 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100759 case KVM_CAP_S390_AIS:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus) {
762 r = -EBUSY;
763 } else {
764 set_kvm_facility(kvm->arch.model.fac_mask, 72);
765 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100766 r = 0;
767 }
768 mutex_unlock(&kvm->lock);
769 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
770 r ? "(not available)" : "(success)");
771 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100772 case KVM_CAP_S390_GS:
773 r = -EINVAL;
774 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100775 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100776 r = -EBUSY;
777 } else if (test_facility(133)) {
778 set_kvm_facility(kvm->arch.model.fac_mask, 133);
779 set_kvm_facility(kvm->arch.model.fac_list, 133);
780 r = 0;
781 }
782 mutex_unlock(&kvm->lock);
783 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
784 r ? "(not available)" : "(success)");
785 break;
Janosch Franka4499382018-07-13 11:28:31 +0100786 case KVM_CAP_S390_HPAGE_1M:
787 mutex_lock(&kvm->lock);
788 if (kvm->created_vcpus)
789 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100790 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100791 r = -EINVAL;
792 else {
793 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700794 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100795 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700796 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100797 /*
798 * We might have to create fake 4k page
799 * tables. To avoid that the hardware works on
800 * stale PGSTEs, we emulate these instructions.
801 */
802 kvm->arch.use_skf = 0;
803 kvm->arch.use_pfmfi = 0;
804 }
805 mutex_unlock(&kvm->lock);
806 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
807 r ? "(not available)" : "(success)");
808 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100809 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200810 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100811 kvm->arch.user_stsi = 1;
812 r = 0;
813 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200814 case KVM_CAP_S390_USER_INSTR0:
815 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
816 kvm->arch.user_instr0 = 1;
817 icpt_operexc_on_all_vcpus(kvm);
818 r = 0;
819 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200820 default:
821 r = -EINVAL;
822 break;
823 }
824 return r;
825}
826
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100827static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
828{
829 int ret;
830
831 switch (attr->attr) {
832 case KVM_S390_VM_MEM_LIMIT_SIZE:
833 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200834 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100835 kvm->arch.mem_limit);
836 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100837 ret = -EFAULT;
838 break;
839 default:
840 ret = -ENXIO;
841 break;
842 }
843 return ret;
844}
845
846static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200847{
848 int ret;
849 unsigned int idx;
850 switch (attr->attr) {
851 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100852 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100853 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200854 break;
855
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200856 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200857 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100858 if (kvm->created_vcpus)
859 ret = -EBUSY;
860 else if (kvm->mm->context.allow_gmap_hpage_1m)
861 ret = -EINVAL;
862 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200863 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100864 /* Not compatible with cmma. */
865 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200866 ret = 0;
867 }
868 mutex_unlock(&kvm->lock);
869 break;
870 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100871 ret = -ENXIO;
872 if (!sclp.has_cmma)
873 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200874 ret = -EINVAL;
875 if (!kvm->arch.use_cmma)
876 break;
877
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200878 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200879 mutex_lock(&kvm->lock);
880 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200881 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200882 srcu_read_unlock(&kvm->srcu, idx);
883 mutex_unlock(&kvm->lock);
884 ret = 0;
885 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100886 case KVM_S390_VM_MEM_LIMIT_SIZE: {
887 unsigned long new_limit;
888
889 if (kvm_is_ucontrol(kvm))
890 return -EINVAL;
891
892 if (get_user(new_limit, (u64 __user *)attr->addr))
893 return -EFAULT;
894
Dominik Dingela3a92c32014-12-01 17:24:42 +0100895 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
896 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100897 return -E2BIG;
898
Dominik Dingela3a92c32014-12-01 17:24:42 +0100899 if (!new_limit)
900 return -EINVAL;
901
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100902 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100903 if (new_limit != KVM_S390_NO_MEM_LIMIT)
904 new_limit -= 1;
905
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100906 ret = -EBUSY;
907 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200908 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100909 /* gmap_create will round the limit up */
910 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100911
912 if (!new) {
913 ret = -ENOMEM;
914 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100915 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100916 new->private = kvm;
917 kvm->arch.gmap = new;
918 ret = 0;
919 }
920 }
921 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100922 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
923 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
924 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100925 break;
926 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200927 default:
928 ret = -ENXIO;
929 break;
930 }
931 return ret;
932}
933
Tony Krowiaka374e892014-09-03 10:13:53 +0200934static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
935
Tony Krowiak20c922f2018-04-22 11:37:03 -0400936void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200937{
938 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000939 unsigned long i;
Tony Krowiaka374e892014-09-03 10:13:53 +0200940
Tony Krowiak20c922f2018-04-22 11:37:03 -0400941 kvm_s390_vcpu_block_all(kvm);
942
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400943 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400944 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400945 /* recreate the shadow crycb by leaving the VSIE handler */
946 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
947 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400948
949 kvm_s390_vcpu_unblock_all(kvm);
950}
951
952static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
953{
Tony Krowiaka374e892014-09-03 10:13:53 +0200954 mutex_lock(&kvm->lock);
955 switch (attr->attr) {
956 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200957 if (!test_kvm_facility(kvm, 76)) {
958 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400959 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200960 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200961 get_random_bytes(
962 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
963 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
964 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200965 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 break;
967 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200968 if (!test_kvm_facility(kvm, 76)) {
969 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400970 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200971 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200972 get_random_bytes(
973 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
974 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
975 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200976 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 break;
978 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200979 if (!test_kvm_facility(kvm, 76)) {
980 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400981 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200982 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200983 kvm->arch.crypto.aes_kw = 0;
984 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
985 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200986 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200987 break;
988 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200989 if (!test_kvm_facility(kvm, 76)) {
990 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400991 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200992 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200993 kvm->arch.crypto.dea_kw = 0;
994 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
995 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200996 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200997 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400998 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
999 if (!ap_instructions_available()) {
1000 mutex_unlock(&kvm->lock);
1001 return -EOPNOTSUPP;
1002 }
1003 kvm->arch.crypto.apie = 1;
1004 break;
1005 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1006 if (!ap_instructions_available()) {
1007 mutex_unlock(&kvm->lock);
1008 return -EOPNOTSUPP;
1009 }
1010 kvm->arch.crypto.apie = 0;
1011 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001012 default:
1013 mutex_unlock(&kvm->lock);
1014 return -ENXIO;
1015 }
1016
Tony Krowiak20c922f2018-04-22 11:37:03 -04001017 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001018 mutex_unlock(&kvm->lock);
1019 return 0;
1020}
1021
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001022static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1023{
Marc Zyngier46808a42021-11-16 16:04:02 +00001024 unsigned long cx;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 struct kvm_vcpu *vcpu;
1026
1027 kvm_for_each_vcpu(cx, vcpu, kvm)
1028 kvm_s390_sync_request(req, vcpu);
1029}
1030
1031/*
1032 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001033 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001034 */
1035static int kvm_s390_vm_start_migration(struct kvm *kvm)
1036{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001039 unsigned long ram_pages = 0;
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001040 int bkt;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001041
1042 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001043 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001045 slots = kvm_memslots(kvm);
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001046 if (!slots || kvm_memslots_empty(slots))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 return -EINVAL;
1048
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001049 if (!kvm->arch.use_cmma) {
1050 kvm->arch.migration_mode = 1;
1051 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001053 /* mark all the pages in active slots as dirty */
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001054 kvm_for_each_memslot(ms, bkt, slots) {
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001055 if (!ms->dirty_bitmap)
1056 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001057 /*
1058 * The second half of the bitmap is only used on x86,
1059 * and would be wasted otherwise, so we put it to good
1060 * use here to keep track of the state of the storage
1061 * attributes.
1062 */
1063 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1064 ram_pages += ms->npages;
1065 }
1066 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1067 kvm->arch.migration_mode = 1;
1068 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001069 return 0;
1070}
1071
1072/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001073 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001074 * kvm_s390_vm_start_migration.
1075 */
1076static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1077{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001078 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001079 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001080 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001081 kvm->arch.migration_mode = 0;
1082 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001083 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001084 return 0;
1085}
1086
1087static int kvm_s390_vm_set_migration(struct kvm *kvm,
1088 struct kvm_device_attr *attr)
1089{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001090 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001091
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001092 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001093 switch (attr->attr) {
1094 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001095 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001096 break;
1097 case KVM_S390_VM_MIGRATION_STOP:
1098 res = kvm_s390_vm_stop_migration(kvm);
1099 break;
1100 default:
1101 break;
1102 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001103 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001104
1105 return res;
1106}
1107
1108static int kvm_s390_vm_get_migration(struct kvm *kvm,
1109 struct kvm_device_attr *attr)
1110{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001111 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001112
1113 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1114 return -ENXIO;
1115
1116 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1117 return -EFAULT;
1118 return 0;
1119}
1120
Collin L. Walling8fa16962016-07-26 15:29:44 -04001121static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1122{
1123 struct kvm_s390_vm_tod_clock gtod;
1124
1125 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1126 return -EFAULT;
1127
David Hildenbrand0e7def52018-02-07 12:46:43 +01001128 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001129 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001130 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001131
1132 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1133 gtod.epoch_idx, gtod.tod);
1134
1135 return 0;
1136}
1137
Jason J. Herne72f25022014-11-25 09:46:02 -05001138static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1139{
1140 u8 gtod_high;
1141
1142 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1143 sizeof(gtod_high)))
1144 return -EFAULT;
1145
1146 if (gtod_high != 0)
1147 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001148 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001149
1150 return 0;
1151}
1152
1153static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1154{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001155 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001156
David Hildenbrand0e7def52018-02-07 12:46:43 +01001157 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1158 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001159 return -EFAULT;
1160
David Hildenbrand0e7def52018-02-07 12:46:43 +01001161 kvm_s390_set_tod_clock(kvm, &gtod);
1162 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001163 return 0;
1164}
1165
1166static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1167{
1168 int ret;
1169
1170 if (attr->flags)
1171 return -EINVAL;
1172
1173 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001174 case KVM_S390_VM_TOD_EXT:
1175 ret = kvm_s390_set_tod_ext(kvm, attr);
1176 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001177 case KVM_S390_VM_TOD_HIGH:
1178 ret = kvm_s390_set_tod_high(kvm, attr);
1179 break;
1180 case KVM_S390_VM_TOD_LOW:
1181 ret = kvm_s390_set_tod_low(kvm, attr);
1182 break;
1183 default:
1184 ret = -ENXIO;
1185 break;
1186 }
1187 return ret;
1188}
1189
David Hildenbrand33d1b272018-04-27 14:36:13 +02001190static void kvm_s390_get_tod_clock(struct kvm *kvm,
1191 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001192{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001193 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001194
1195 preempt_disable();
1196
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001197 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001198
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001199 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001200 gtod->epoch_idx = 0;
1201 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001202 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1203 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001204 gtod->epoch_idx += 1;
1205 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001206
1207 preempt_enable();
1208}
1209
1210static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1211{
1212 struct kvm_s390_vm_tod_clock gtod;
1213
1214 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001215 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001216 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1217 return -EFAULT;
1218
1219 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1220 gtod.epoch_idx, gtod.tod);
1221 return 0;
1222}
1223
Jason J. Herne72f25022014-11-25 09:46:02 -05001224static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1225{
1226 u8 gtod_high = 0;
1227
1228 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1229 sizeof(gtod_high)))
1230 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001231 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001232
1233 return 0;
1234}
1235
1236static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1237{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001238 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001239
David Hildenbrand60417fc2015-09-29 16:20:36 +02001240 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001241 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1242 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001243 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001244
1245 return 0;
1246}
1247
1248static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1249{
1250 int ret;
1251
1252 if (attr->flags)
1253 return -EINVAL;
1254
1255 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001256 case KVM_S390_VM_TOD_EXT:
1257 ret = kvm_s390_get_tod_ext(kvm, attr);
1258 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001259 case KVM_S390_VM_TOD_HIGH:
1260 ret = kvm_s390_get_tod_high(kvm, attr);
1261 break;
1262 case KVM_S390_VM_TOD_LOW:
1263 ret = kvm_s390_get_tod_low(kvm, attr);
1264 break;
1265 default:
1266 ret = -ENXIO;
1267 break;
1268 }
1269 return ret;
1270}
1271
Michael Mueller658b6ed2015-02-02 15:49:35 +01001272static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1273{
1274 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001275 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001276 int ret = 0;
1277
1278 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001279 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001280 ret = -EBUSY;
1281 goto out;
1282 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001283 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 if (!proc) {
1285 ret = -ENOMEM;
1286 goto out;
1287 }
1288 if (!copy_from_user(proc, (void __user *)attr->addr,
1289 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001290 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001291 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1292 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001293 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001294 if (proc->ibc > unblocked_ibc)
1295 kvm->arch.model.ibc = unblocked_ibc;
1296 else if (proc->ibc < lowest_ibc)
1297 kvm->arch.model.ibc = lowest_ibc;
1298 else
1299 kvm->arch.model.ibc = proc->ibc;
1300 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001301 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001302 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001303 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1304 kvm->arch.model.ibc,
1305 kvm->arch.model.cpuid);
1306 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1307 kvm->arch.model.fac_list[0],
1308 kvm->arch.model.fac_list[1],
1309 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001310 } else
1311 ret = -EFAULT;
1312 kfree(proc);
1313out:
1314 mutex_unlock(&kvm->lock);
1315 return ret;
1316}
1317
David Hildenbrand15c97052015-03-19 17:36:43 +01001318static int kvm_s390_set_processor_feat(struct kvm *kvm,
1319 struct kvm_device_attr *attr)
1320{
1321 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001322
1323 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1324 return -EFAULT;
1325 if (!bitmap_subset((unsigned long *) data.feat,
1326 kvm_s390_available_cpu_feat,
1327 KVM_S390_VM_CPU_FEAT_NR_BITS))
1328 return -EINVAL;
1329
1330 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001331 if (kvm->created_vcpus) {
1332 mutex_unlock(&kvm->lock);
1333 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001334 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001335 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1336 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001337 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001338 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1339 data.feat[0],
1340 data.feat[1],
1341 data.feat[2]);
1342 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001343}
1344
David Hildenbrand0a763c72016-05-18 16:03:47 +02001345static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1346 struct kvm_device_attr *attr)
1347{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001348 mutex_lock(&kvm->lock);
1349 if (kvm->created_vcpus) {
1350 mutex_unlock(&kvm->lock);
1351 return -EBUSY;
1352 }
1353
1354 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1355 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1356 mutex_unlock(&kvm->lock);
1357 return -EFAULT;
1358 }
1359 mutex_unlock(&kvm->lock);
1360
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001361 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1366 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1367 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1369 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1372 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1375 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1378 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1379 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1381 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1382 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1384 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1385 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1387 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1388 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1390 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1391 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1393 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1394 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1396 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1397 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1398 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1399 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1400 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1401 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1402 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1403 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1404 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001405 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1406 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001408 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1409 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001413 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1414 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001418
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001419 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001420}
1421
Michael Mueller658b6ed2015-02-02 15:49:35 +01001422static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1423{
1424 int ret = -ENXIO;
1425
1426 switch (attr->attr) {
1427 case KVM_S390_VM_CPU_PROCESSOR:
1428 ret = kvm_s390_set_processor(kvm, attr);
1429 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001430 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1431 ret = kvm_s390_set_processor_feat(kvm, attr);
1432 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001433 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1434 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1435 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001436 }
1437 return ret;
1438}
1439
1440static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1441{
1442 struct kvm_s390_vm_cpu_processor *proc;
1443 int ret = 0;
1444
Christian Borntraegerc4196212020-11-06 08:34:23 +01001445 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001446 if (!proc) {
1447 ret = -ENOMEM;
1448 goto out;
1449 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001450 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001451 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001452 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1453 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001454 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1455 kvm->arch.model.ibc,
1456 kvm->arch.model.cpuid);
1457 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1458 kvm->arch.model.fac_list[0],
1459 kvm->arch.model.fac_list[1],
1460 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001461 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1462 ret = -EFAULT;
1463 kfree(proc);
1464out:
1465 return ret;
1466}
1467
1468static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1469{
1470 struct kvm_s390_vm_cpu_machine *mach;
1471 int ret = 0;
1472
Christian Borntraegerc4196212020-11-06 08:34:23 +01001473 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001474 if (!mach) {
1475 ret = -ENOMEM;
1476 goto out;
1477 }
1478 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001479 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001480 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001481 S390_ARCH_FAC_LIST_SIZE_BYTE);
Sven Schnelle17e89e12021-05-05 22:01:10 +02001482 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1483 sizeof(stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001484 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1485 kvm->arch.model.ibc,
1486 kvm->arch.model.cpuid);
1487 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1488 mach->fac_mask[0],
1489 mach->fac_mask[1],
1490 mach->fac_mask[2]);
1491 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1492 mach->fac_list[0],
1493 mach->fac_list[1],
1494 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001495 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1496 ret = -EFAULT;
1497 kfree(mach);
1498out:
1499 return ret;
1500}
1501
David Hildenbrand15c97052015-03-19 17:36:43 +01001502static int kvm_s390_get_processor_feat(struct kvm *kvm,
1503 struct kvm_device_attr *attr)
1504{
1505 struct kvm_s390_vm_cpu_feat data;
1506
1507 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1508 KVM_S390_VM_CPU_FEAT_NR_BITS);
1509 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1510 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001511 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1512 data.feat[0],
1513 data.feat[1],
1514 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001515 return 0;
1516}
1517
1518static int kvm_s390_get_machine_feat(struct kvm *kvm,
1519 struct kvm_device_attr *attr)
1520{
1521 struct kvm_s390_vm_cpu_feat data;
1522
1523 bitmap_copy((unsigned long *) data.feat,
1524 kvm_s390_available_cpu_feat,
1525 KVM_S390_VM_CPU_FEAT_NR_BITS);
1526 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1527 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001528 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1529 data.feat[0],
1530 data.feat[1],
1531 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001532 return 0;
1533}
1534
David Hildenbrand0a763c72016-05-18 16:03:47 +02001535static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1536 struct kvm_device_attr *attr)
1537{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001538 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1539 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1540 return -EFAULT;
1541
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001542 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1547 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1550 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1553 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1556 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1559 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1562 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1565 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1568 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1571 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1572 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1574 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1577 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1579 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1580 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1582 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1583 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1585 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001586 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001589 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001594 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001599
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001600 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001601}
1602
1603static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1604 struct kvm_device_attr *attr)
1605{
1606 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1607 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1608 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001609
1610 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1613 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1614 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1615 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1616 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1617 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1618 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1621 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1624 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1627 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1628 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1629 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1630 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1631 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1632 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1633 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1634 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1635 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1636 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1637 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1638 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1639 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1640 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1641 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1642 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1643 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1644 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1645 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1646 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1647 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1648 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1649 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1650 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1651 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1652 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1653 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001654 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1655 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1656 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001657 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1658 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1659 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1660 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1661 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001662 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1663 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1664 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1665 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1666 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001667
David Hildenbrand0a763c72016-05-18 16:03:47 +02001668 return 0;
1669}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001670
Michael Mueller658b6ed2015-02-02 15:49:35 +01001671static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1672{
1673 int ret = -ENXIO;
1674
1675 switch (attr->attr) {
1676 case KVM_S390_VM_CPU_PROCESSOR:
1677 ret = kvm_s390_get_processor(kvm, attr);
1678 break;
1679 case KVM_S390_VM_CPU_MACHINE:
1680 ret = kvm_s390_get_machine(kvm, attr);
1681 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001682 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1683 ret = kvm_s390_get_processor_feat(kvm, attr);
1684 break;
1685 case KVM_S390_VM_CPU_MACHINE_FEAT:
1686 ret = kvm_s390_get_machine_feat(kvm, attr);
1687 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001688 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1689 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1690 break;
1691 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1692 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1693 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001694 }
1695 return ret;
1696}
1697
Dominik Dingelf2061652014-04-09 13:13:00 +02001698static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1699{
1700 int ret;
1701
1702 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001703 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001704 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001705 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001706 case KVM_S390_VM_TOD:
1707 ret = kvm_s390_set_tod(kvm, attr);
1708 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001709 case KVM_S390_VM_CPU_MODEL:
1710 ret = kvm_s390_set_cpu_model(kvm, attr);
1711 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001712 case KVM_S390_VM_CRYPTO:
1713 ret = kvm_s390_vm_set_crypto(kvm, attr);
1714 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001715 case KVM_S390_VM_MIGRATION:
1716 ret = kvm_s390_vm_set_migration(kvm, attr);
1717 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001718 default:
1719 ret = -ENXIO;
1720 break;
1721 }
1722
1723 return ret;
1724}
1725
1726static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1727{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001728 int ret;
1729
1730 switch (attr->group) {
1731 case KVM_S390_VM_MEM_CTRL:
1732 ret = kvm_s390_get_mem_control(kvm, attr);
1733 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001734 case KVM_S390_VM_TOD:
1735 ret = kvm_s390_get_tod(kvm, attr);
1736 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001737 case KVM_S390_VM_CPU_MODEL:
1738 ret = kvm_s390_get_cpu_model(kvm, attr);
1739 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001740 case KVM_S390_VM_MIGRATION:
1741 ret = kvm_s390_vm_get_migration(kvm, attr);
1742 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001743 default:
1744 ret = -ENXIO;
1745 break;
1746 }
1747
1748 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001749}
1750
1751static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1752{
1753 int ret;
1754
1755 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001756 case KVM_S390_VM_MEM_CTRL:
1757 switch (attr->attr) {
1758 case KVM_S390_VM_MEM_ENABLE_CMMA:
1759 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001760 ret = sclp.has_cmma ? 0 : -ENXIO;
1761 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001762 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001763 ret = 0;
1764 break;
1765 default:
1766 ret = -ENXIO;
1767 break;
1768 }
1769 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001770 case KVM_S390_VM_TOD:
1771 switch (attr->attr) {
1772 case KVM_S390_VM_TOD_LOW:
1773 case KVM_S390_VM_TOD_HIGH:
1774 ret = 0;
1775 break;
1776 default:
1777 ret = -ENXIO;
1778 break;
1779 }
1780 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001781 case KVM_S390_VM_CPU_MODEL:
1782 switch (attr->attr) {
1783 case KVM_S390_VM_CPU_PROCESSOR:
1784 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001785 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1786 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001787 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001788 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001789 ret = 0;
1790 break;
1791 default:
1792 ret = -ENXIO;
1793 break;
1794 }
1795 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001796 case KVM_S390_VM_CRYPTO:
1797 switch (attr->attr) {
1798 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1799 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1800 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1801 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1802 ret = 0;
1803 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001804 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1805 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1806 ret = ap_instructions_available() ? 0 : -ENXIO;
1807 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001808 default:
1809 ret = -ENXIO;
1810 break;
1811 }
1812 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001813 case KVM_S390_VM_MIGRATION:
1814 ret = 0;
1815 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001816 default:
1817 ret = -ENXIO;
1818 break;
1819 }
1820
1821 return ret;
1822}
1823
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001824static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1825{
1826 uint8_t *keys;
1827 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001828 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001829
1830 if (args->flags != 0)
1831 return -EINVAL;
1832
1833 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001834 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001835 return KVM_S390_GET_SKEYS_NONE;
1836
1837 /* Enforce sane limit on memory allocation */
1838 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1839 return -EINVAL;
1840
Christian Borntraegerc4196212020-11-06 08:34:23 +01001841 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001842 if (!keys)
1843 return -ENOMEM;
1844
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001845 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001846 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001847 for (i = 0; i < args->count; i++) {
1848 hva = gfn_to_hva(kvm, args->start_gfn + i);
1849 if (kvm_is_error_hva(hva)) {
1850 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001851 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001852 }
1853
David Hildenbrand154c8c12016-05-09 11:22:34 +02001854 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1855 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001856 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001857 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001858 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001859 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001860
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001861 if (!r) {
1862 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1863 sizeof(uint8_t) * args->count);
1864 if (r)
1865 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001866 }
1867
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001868 kvfree(keys);
1869 return r;
1870}
1871
1872static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1873{
1874 uint8_t *keys;
1875 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001876 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001877 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001878
1879 if (args->flags != 0)
1880 return -EINVAL;
1881
1882 /* Enforce sane limit on memory allocation */
1883 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1884 return -EINVAL;
1885
Christian Borntraegerc4196212020-11-06 08:34:23 +01001886 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001887 if (!keys)
1888 return -ENOMEM;
1889
1890 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1891 sizeof(uint8_t) * args->count);
1892 if (r) {
1893 r = -EFAULT;
1894 goto out;
1895 }
1896
1897 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001898 r = s390_enable_skey();
1899 if (r)
1900 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001901
Janosch Frankbd096f62018-07-18 13:40:22 +01001902 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001903 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001904 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001905 while (i < args->count) {
1906 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001907 hva = gfn_to_hva(kvm, args->start_gfn + i);
1908 if (kvm_is_error_hva(hva)) {
1909 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001910 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001911 }
1912
1913 /* Lowest order bit is reserved */
1914 if (keys[i] & 0x01) {
1915 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001916 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001917 }
1918
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001919 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001920 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001921 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001922 FAULT_FLAG_WRITE, &unlocked);
1923 if (r)
1924 break;
1925 }
1926 if (!r)
1927 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001928 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001929 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001930 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001931out:
1932 kvfree(keys);
1933 return r;
1934}
1935
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001936/*
1937 * Base address and length must be sent at the start of each block, therefore
1938 * it's cheaper to send some clean data, as long as it's less than the size of
1939 * two longs.
1940 */
1941#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1942/* for consistency */
1943#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1944
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001945static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1946 u8 *res, unsigned long bufsize)
1947{
1948 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1949
1950 args->count = 0;
1951 while (args->count < bufsize) {
1952 hva = gfn_to_hva(kvm, cur_gfn);
1953 /*
1954 * We return an error if the first value was invalid, but we
1955 * return successfully if at least one value was copied.
1956 */
1957 if (kvm_is_error_hva(hva))
1958 return args->count ? 0 : -EFAULT;
1959 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1960 pgstev = 0;
1961 res[args->count++] = (pgstev >> 24) & 0x43;
1962 cur_gfn++;
1963 }
1964
1965 return 0;
1966}
1967
Maciej S. Szmigieroc928bfc2021-12-06 20:54:25 +01001968static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
1969 gfn_t gfn)
1970{
1971 return ____gfn_to_memslot(slots, gfn, true);
1972}
1973
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001974static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1975 unsigned long cur_gfn)
1976{
Maciej S. Szmigieroc928bfc2021-12-06 20:54:25 +01001977 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001978 unsigned long ofs = cur_gfn - ms->base_gfn;
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001979 struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001980
1981 if (ms->base_gfn + ms->npages <= cur_gfn) {
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001982 mnode = rb_next(mnode);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001983 /* If we are above the highest slot, wrap around */
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001984 if (!mnode)
1985 mnode = rb_first(&slots->gfn_tree);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001986
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001987 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001988 ofs = 0;
1989 }
1990 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01001991 while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
1992 ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
Yury Norovb5c7e7e2021-08-14 14:17:03 -07001993 ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001994 }
1995 return ms->base_gfn + ofs;
1996}
1997
1998static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1999 u8 *res, unsigned long bufsize)
2000{
2001 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2002 struct kvm_memslots *slots = kvm_memslots(kvm);
2003 struct kvm_memory_slot *ms;
2004
Maciej S. Szmigieroa54d8062021-12-06 20:54:30 +01002005 if (unlikely(kvm_memslots_empty(slots)))
Sean Christopherson0774a962020-03-20 13:55:40 -07002006 return 0;
2007
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002008 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2009 ms = gfn_to_memslot(kvm, cur_gfn);
2010 args->count = 0;
2011 args->start_gfn = cur_gfn;
2012 if (!ms)
2013 return 0;
2014 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
Maciej S. Szmigiero6a656832021-12-06 20:54:29 +01002015 mem_end = kvm_s390_get_gfn_end(slots);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002016
2017 while (args->count < bufsize) {
2018 hva = gfn_to_hva(kvm, cur_gfn);
2019 if (kvm_is_error_hva(hva))
2020 return 0;
2021 /* Decrement only if we actually flipped the bit to 0 */
2022 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2023 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2024 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2025 pgstev = 0;
2026 /* Save the value */
2027 res[args->count++] = (pgstev >> 24) & 0x43;
2028 /* If the next bit is too far away, stop. */
2029 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2030 return 0;
2031 /* If we reached the previous "next", find the next one */
2032 if (cur_gfn == next_gfn)
2033 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2034 /* Reached the end of memory or of the buffer, stop */
2035 if ((next_gfn >= mem_end) ||
2036 (next_gfn - args->start_gfn >= bufsize))
2037 return 0;
2038 cur_gfn++;
2039 /* Reached the end of the current memslot, take the next one. */
2040 if (cur_gfn - ms->base_gfn >= ms->npages) {
2041 ms = gfn_to_memslot(kvm, cur_gfn);
2042 if (!ms)
2043 return 0;
2044 }
2045 }
2046 return 0;
2047}
2048
2049/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002050 * This function searches for the next page with dirty CMMA attributes, and
2051 * saves the attributes in the buffer up to either the end of the buffer or
2052 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2053 * no trailing clean bytes are saved.
2054 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2055 * output buffer will indicate 0 as length.
2056 */
2057static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2058 struct kvm_s390_cmma_log *args)
2059{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002060 unsigned long bufsize;
2061 int srcu_idx, peek, ret;
2062 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002063
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002064 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002065 return -ENXIO;
2066 /* Invalid/unsupported flags were specified */
2067 if (args->flags & ~KVM_S390_CMMA_PEEK)
2068 return -EINVAL;
2069 /* Migration mode query, and we are not doing a migration */
2070 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002071 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002072 return -EINVAL;
2073 /* CMMA is disabled or was not used, or the buffer has length zero */
2074 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002075 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076 memset(args, 0, sizeof(*args));
2077 return 0;
2078 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002079 /* We are not peeking, and there are no dirty pages */
2080 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2081 memset(args, 0, sizeof(*args));
2082 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002083 }
2084
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002085 values = vmalloc(bufsize);
2086 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087 return -ENOMEM;
2088
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002089 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002090 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002091 if (peek)
2092 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2093 else
2094 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002095 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002096 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002098 if (kvm->arch.migration_mode)
2099 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2100 else
2101 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002102
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002103 if (copy_to_user((void __user *)args->values, values, args->count))
2104 ret = -EFAULT;
2105
2106 vfree(values);
2107 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002108}
2109
2110/*
2111 * This function sets the CMMA attributes for the given pages. If the input
2112 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002113 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002114 */
2115static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2116 const struct kvm_s390_cmma_log *args)
2117{
2118 unsigned long hva, mask, pgstev, i;
2119 uint8_t *bits;
2120 int srcu_idx, r = 0;
2121
2122 mask = args->mask;
2123
2124 if (!kvm->arch.use_cmma)
2125 return -ENXIO;
2126 /* invalid/unsupported flags */
2127 if (args->flags != 0)
2128 return -EINVAL;
2129 /* Enforce sane limit on memory allocation */
2130 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2131 return -EINVAL;
2132 /* Nothing to do */
2133 if (args->count == 0)
2134 return 0;
2135
Kees Cook42bc47b2018-06-12 14:27:11 -07002136 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002137 if (!bits)
2138 return -ENOMEM;
2139
2140 r = copy_from_user(bits, (void __user *)args->values, args->count);
2141 if (r) {
2142 r = -EFAULT;
2143 goto out;
2144 }
2145
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002146 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002147 srcu_idx = srcu_read_lock(&kvm->srcu);
2148 for (i = 0; i < args->count; i++) {
2149 hva = gfn_to_hva(kvm, args->start_gfn + i);
2150 if (kvm_is_error_hva(hva)) {
2151 r = -EFAULT;
2152 break;
2153 }
2154
2155 pgstev = bits[i];
2156 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002157 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002158 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2159 }
2160 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002161 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002162
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002163 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002164 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002165 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002166 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002167 }
2168out:
2169 vfree(bits);
2170 return r;
2171}
2172
Janosch Frank29b40f12019-09-30 04:19:18 -04002173static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2174{
2175 struct kvm_vcpu *vcpu;
2176 u16 rc, rrc;
2177 int ret = 0;
Marc Zyngier46808a42021-11-16 16:04:02 +00002178 unsigned long i;
Janosch Frank29b40f12019-09-30 04:19:18 -04002179
2180 /*
2181 * We ignore failures and try to destroy as many CPUs as possible.
2182 * At the same time we must not free the assigned resources when
2183 * this fails, as the ultravisor has still access to that memory.
2184 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2185 * behind.
2186 * We want to return the first failure rc and rrc, though.
2187 */
2188 kvm_for_each_vcpu(i, vcpu, kvm) {
2189 mutex_lock(&vcpu->mutex);
2190 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2191 *rcp = rc;
2192 *rrcp = rrc;
2193 ret = -EIO;
2194 }
2195 mutex_unlock(&vcpu->mutex);
2196 }
2197 return ret;
2198}
2199
2200static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2201{
Marc Zyngier46808a42021-11-16 16:04:02 +00002202 unsigned long i;
2203 int r = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04002204 u16 dummy;
2205
2206 struct kvm_vcpu *vcpu;
2207
2208 kvm_for_each_vcpu(i, vcpu, kvm) {
2209 mutex_lock(&vcpu->mutex);
2210 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2211 mutex_unlock(&vcpu->mutex);
2212 if (r)
2213 break;
2214 }
2215 if (r)
2216 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2217 return r;
2218}
2219
2220static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2221{
2222 int r = 0;
2223 u16 dummy;
2224 void __user *argp = (void __user *)cmd->data;
2225
2226 switch (cmd->cmd) {
2227 case KVM_PV_ENABLE: {
2228 r = -EINVAL;
2229 if (kvm_s390_pv_is_protected(kvm))
2230 break;
2231
2232 /*
2233 * FMT 4 SIE needs esca. As we never switch back to bsca from
2234 * esca, we need no cleanup in the error cases below
2235 */
2236 r = sca_switch_to_extended(kvm);
2237 if (r)
2238 break;
2239
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002240 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002241 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002242 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002243 if (r)
2244 break;
2245
Janosch Frank29b40f12019-09-30 04:19:18 -04002246 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2247 if (r)
2248 break;
2249
2250 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2251 if (r)
2252 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002253
2254 /* we need to block service interrupts from now on */
2255 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002256 break;
2257 }
2258 case KVM_PV_DISABLE: {
2259 r = -EINVAL;
2260 if (!kvm_s390_pv_is_protected(kvm))
2261 break;
2262
2263 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2264 /*
2265 * If a CPU could not be destroyed, destroy VM will also fail.
2266 * There is no point in trying to destroy it. Instead return
2267 * the rc and rrc from the first CPU that failed destroying.
2268 */
2269 if (r)
2270 break;
2271 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002272
2273 /* no need to block service interrupts any more */
2274 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002275 break;
2276 }
2277 case KVM_PV_SET_SEC_PARMS: {
2278 struct kvm_s390_pv_sec_parm parms = {};
2279 void *hdr;
2280
2281 r = -EINVAL;
2282 if (!kvm_s390_pv_is_protected(kvm))
2283 break;
2284
2285 r = -EFAULT;
2286 if (copy_from_user(&parms, argp, sizeof(parms)))
2287 break;
2288
2289 /* Currently restricted to 8KB */
2290 r = -EINVAL;
2291 if (parms.length > PAGE_SIZE * 2)
2292 break;
2293
2294 r = -ENOMEM;
2295 hdr = vmalloc(parms.length);
2296 if (!hdr)
2297 break;
2298
2299 r = -EFAULT;
2300 if (!copy_from_user(hdr, (void __user *)parms.origin,
2301 parms.length))
2302 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2303 &cmd->rc, &cmd->rrc);
2304
2305 vfree(hdr);
2306 break;
2307 }
2308 case KVM_PV_UNPACK: {
2309 struct kvm_s390_pv_unp unp = {};
2310
2311 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002312 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002313 break;
2314
2315 r = -EFAULT;
2316 if (copy_from_user(&unp, argp, sizeof(unp)))
2317 break;
2318
2319 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2320 &cmd->rc, &cmd->rrc);
2321 break;
2322 }
2323 case KVM_PV_VERIFY: {
2324 r = -EINVAL;
2325 if (!kvm_s390_pv_is_protected(kvm))
2326 break;
2327
2328 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2329 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2330 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2331 cmd->rrc);
2332 break;
2333 }
Janosch Franke0d27732019-05-09 13:07:21 +02002334 case KVM_PV_PREP_RESET: {
2335 r = -EINVAL;
2336 if (!kvm_s390_pv_is_protected(kvm))
2337 break;
2338
2339 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2340 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2341 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2342 cmd->rc, cmd->rrc);
2343 break;
2344 }
2345 case KVM_PV_UNSHARE_ALL: {
2346 r = -EINVAL;
2347 if (!kvm_s390_pv_is_protected(kvm))
2348 break;
2349
2350 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2351 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2352 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2353 cmd->rc, cmd->rrc);
2354 break;
2355 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002356 default:
2357 r = -ENOTTY;
2358 }
2359 return r;
2360}
2361
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002362long kvm_arch_vm_ioctl(struct file *filp,
2363 unsigned int ioctl, unsigned long arg)
2364{
2365 struct kvm *kvm = filp->private_data;
2366 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002367 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002368 int r;
2369
2370 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002371 case KVM_S390_INTERRUPT: {
2372 struct kvm_s390_interrupt s390int;
2373
2374 r = -EFAULT;
2375 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2376 break;
2377 r = kvm_s390_inject_vm(kvm, &s390int);
2378 break;
2379 }
Cornelia Huck84223592013-07-15 13:36:01 +02002380 case KVM_CREATE_IRQCHIP: {
2381 struct kvm_irq_routing_entry routing;
2382
2383 r = -EINVAL;
2384 if (kvm->arch.use_irqchip) {
2385 /* Set up dummy routing. */
2386 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002387 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002388 }
2389 break;
2390 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002391 case KVM_SET_DEVICE_ATTR: {
2392 r = -EFAULT;
2393 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2394 break;
2395 r = kvm_s390_vm_set_attr(kvm, &attr);
2396 break;
2397 }
2398 case KVM_GET_DEVICE_ATTR: {
2399 r = -EFAULT;
2400 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2401 break;
2402 r = kvm_s390_vm_get_attr(kvm, &attr);
2403 break;
2404 }
2405 case KVM_HAS_DEVICE_ATTR: {
2406 r = -EFAULT;
2407 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2408 break;
2409 r = kvm_s390_vm_has_attr(kvm, &attr);
2410 break;
2411 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002412 case KVM_S390_GET_SKEYS: {
2413 struct kvm_s390_skeys args;
2414
2415 r = -EFAULT;
2416 if (copy_from_user(&args, argp,
2417 sizeof(struct kvm_s390_skeys)))
2418 break;
2419 r = kvm_s390_get_skeys(kvm, &args);
2420 break;
2421 }
2422 case KVM_S390_SET_SKEYS: {
2423 struct kvm_s390_skeys args;
2424
2425 r = -EFAULT;
2426 if (copy_from_user(&args, argp,
2427 sizeof(struct kvm_s390_skeys)))
2428 break;
2429 r = kvm_s390_set_skeys(kvm, &args);
2430 break;
2431 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002432 case KVM_S390_GET_CMMA_BITS: {
2433 struct kvm_s390_cmma_log args;
2434
2435 r = -EFAULT;
2436 if (copy_from_user(&args, argp, sizeof(args)))
2437 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002438 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002439 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002440 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002441 if (!r) {
2442 r = copy_to_user(argp, &args, sizeof(args));
2443 if (r)
2444 r = -EFAULT;
2445 }
2446 break;
2447 }
2448 case KVM_S390_SET_CMMA_BITS: {
2449 struct kvm_s390_cmma_log args;
2450
2451 r = -EFAULT;
2452 if (copy_from_user(&args, argp, sizeof(args)))
2453 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002454 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002455 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002456 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002457 break;
2458 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002459 case KVM_S390_PV_COMMAND: {
2460 struct kvm_pv_cmd args;
2461
Eric Farman67cf68b2021-10-08 22:31:12 +02002462 /* protvirt means user cpu state */
2463 kvm_s390_set_user_cpu_state_ctrl(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002464 r = 0;
2465 if (!is_prot_virt_host()) {
2466 r = -EINVAL;
2467 break;
2468 }
2469 if (copy_from_user(&args, argp, sizeof(args))) {
2470 r = -EFAULT;
2471 break;
2472 }
2473 if (args.flags) {
2474 r = -EINVAL;
2475 break;
2476 }
2477 mutex_lock(&kvm->lock);
2478 r = kvm_s390_handle_pv(kvm, &args);
2479 mutex_unlock(&kvm->lock);
2480 if (copy_to_user(argp, &args, sizeof(args))) {
2481 r = -EFAULT;
2482 break;
2483 }
2484 break;
2485 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002486 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002487 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002488 }
2489
2490 return r;
2491}
2492
Tony Krowiak45c9b472015-01-13 11:33:26 -05002493static int kvm_s390_apxa_installed(void)
2494{
Tony Krowiake585b242018-09-25 19:16:18 -04002495 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002496
Tony Krowiake585b242018-09-25 19:16:18 -04002497 if (ap_instructions_available()) {
2498 if (ap_qci(&info) == 0)
2499 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002500 }
2501
2502 return 0;
2503}
2504
Tony Krowiake585b242018-09-25 19:16:18 -04002505/*
2506 * The format of the crypto control block (CRYCB) is specified in the 3 low
2507 * order bits of the CRYCB designation (CRYCBD) field as follows:
2508 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2509 * AP extended addressing (APXA) facility are installed.
2510 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2511 * Format 2: Both the APXA and MSAX3 facilities are installed
2512 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002513static void kvm_s390_set_crycb_format(struct kvm *kvm)
2514{
2515 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2516
Tony Krowiake585b242018-09-25 19:16:18 -04002517 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2518 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2519
2520 /* Check whether MSAX3 is installed */
2521 if (!test_kvm_facility(kvm, 76))
2522 return;
2523
Tony Krowiak45c9b472015-01-13 11:33:26 -05002524 if (kvm_s390_apxa_installed())
2525 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2526 else
2527 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2528}
2529
Tony Krowiak86956e72021-08-23 17:20:47 -04002530/*
2531 * kvm_arch_crypto_set_masks
2532 *
2533 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2534 * to be set.
2535 * @apm: the mask identifying the accessible AP adapters
2536 * @aqm: the mask identifying the accessible AP domains
2537 * @adm: the mask identifying the accessible AP control domains
2538 *
2539 * Set the masks that identify the adapters, domains and control domains to
2540 * which the KVM guest is granted access.
2541 *
2542 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2543 * function.
2544 */
Pierre Morel0e237e42018-10-05 10:31:09 +02002545void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2546 unsigned long *aqm, unsigned long *adm)
2547{
2548 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2549
Pierre Morel0e237e42018-10-05 10:31:09 +02002550 kvm_s390_vcpu_block_all(kvm);
2551
2552 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2553 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2554 memcpy(crycb->apcb1.apm, apm, 32);
2555 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2556 apm[0], apm[1], apm[2], apm[3]);
2557 memcpy(crycb->apcb1.aqm, aqm, 32);
2558 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2559 aqm[0], aqm[1], aqm[2], aqm[3]);
2560 memcpy(crycb->apcb1.adm, adm, 32);
2561 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2562 adm[0], adm[1], adm[2], adm[3]);
2563 break;
2564 case CRYCB_FORMAT1:
2565 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2566 memcpy(crycb->apcb0.apm, apm, 8);
2567 memcpy(crycb->apcb0.aqm, aqm, 2);
2568 memcpy(crycb->apcb0.adm, adm, 2);
2569 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2570 apm[0], *((unsigned short *)aqm),
2571 *((unsigned short *)adm));
2572 break;
2573 default: /* Can not happen */
2574 break;
2575 }
2576
2577 /* recreate the shadow crycb for each vcpu */
2578 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2579 kvm_s390_vcpu_unblock_all(kvm);
Pierre Morel0e237e42018-10-05 10:31:09 +02002580}
2581EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2582
Tony Krowiak86956e72021-08-23 17:20:47 -04002583/*
2584 * kvm_arch_crypto_clear_masks
2585 *
2586 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2587 * to be cleared.
2588 *
2589 * Clear the masks that identify the adapters, domains and control domains to
2590 * which the KVM guest is granted access.
2591 *
2592 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2593 * function.
2594 */
Tony Krowiak421045982018-09-25 19:16:25 -04002595void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2596{
Tony Krowiak421045982018-09-25 19:16:25 -04002597 kvm_s390_vcpu_block_all(kvm);
2598
2599 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2600 sizeof(kvm->arch.crypto.crycb->apcb0));
2601 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2602 sizeof(kvm->arch.crypto.crycb->apcb1));
2603
Pierre Morel0e237e42018-10-05 10:31:09 +02002604 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002605 /* recreate the shadow crycb for each vcpu */
2606 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002607 kvm_s390_vcpu_unblock_all(kvm);
Tony Krowiak421045982018-09-25 19:16:25 -04002608}
2609EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2610
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002611static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002612{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002613 struct cpuid cpuid;
2614
2615 get_cpu_id(&cpuid);
2616 cpuid.version = 0xff;
2617 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002618}
2619
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002620static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002621{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002622 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002623 kvm_s390_set_crycb_format(kvm);
Tony Krowiak1e753732021-08-23 17:20:46 -04002624 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002625
Tony Krowiake585b242018-09-25 19:16:18 -04002626 if (!test_kvm_facility(kvm, 76))
2627 return;
2628
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002629 /* Enable AES/DEA protected key functions by default */
2630 kvm->arch.crypto.aes_kw = 1;
2631 kvm->arch.crypto.dea_kw = 1;
2632 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2633 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2634 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2635 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002636}
2637
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002638static void sca_dispose(struct kvm *kvm)
2639{
2640 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002641 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002642 else
2643 free_page((unsigned long)(kvm->arch.sca));
2644 kvm->arch.sca = NULL;
2645}
2646
Carsten Ottee08b9632012-01-04 10:25:20 +01002647int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002648{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002649 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002650 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002651 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002652 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002653
Carsten Ottee08b9632012-01-04 10:25:20 +01002654 rc = -EINVAL;
2655#ifdef CONFIG_KVM_S390_UCONTROL
2656 if (type & ~KVM_VM_S390_UCONTROL)
2657 goto out_err;
2658 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2659 goto out_err;
2660#else
2661 if (type)
2662 goto out_err;
2663#endif
2664
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002665 rc = s390_enable_sie();
2666 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002667 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002668
Carsten Otteb2904112011-10-18 12:27:13 +02002669 rc = -ENOMEM;
2670
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002671 if (!sclp.has_64bscao)
2672 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002673 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002674 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002675 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002676 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002677 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002678 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002679 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002680 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002681 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002682 kvm->arch.sca = (struct bsca_block *)
2683 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002684 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002685
2686 sprintf(debug_name, "kvm-%u", current->pid);
2687
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002688 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002689 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002690 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002691
Michael Mueller19114be2017-05-30 14:26:02 +02002692 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002693 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002694 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002695 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002696 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002697
Michael Mueller25c84db2019-01-31 09:52:41 +01002698 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002699 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002700
2701 for (i = 0; i < kvm_s390_fac_size(); i++) {
Sven Schnelle17e89e12021-05-05 22:01:10 +02002702 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002703 (kvm_s390_fac_base[i] |
2704 kvm_s390_fac_ext[i]);
Sven Schnelle17e89e12021-05-05 22:01:10 +02002705 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002706 kvm_s390_fac_base[i];
2707 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002708 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002709
David Hildenbrand19352222017-08-29 16:31:08 +02002710 /* we are always in czam mode - even on pre z14 machines */
2711 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2712 set_kvm_facility(kvm->arch.model.fac_list, 138);
2713 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002714 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2715 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002716 if (MACHINE_HAS_TLB_GUEST) {
2717 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2718 set_kvm_facility(kvm->arch.model.fac_list, 147);
2719 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002720
Pierre Morel05f31e32019-05-21 17:34:37 +02002721 if (css_general_characteristics.aiv && test_facility(65))
2722 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2723
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002724 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002725 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002726
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002727 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002728
Fei Li51978392017-02-17 17:06:26 +08002729 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002730 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002731 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2732 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002733 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002734 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002735
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002736 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002737 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738
Carsten Ottee08b9632012-01-04 10:25:20 +01002739 if (type & KVM_VM_S390_UCONTROL) {
2740 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002741 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002742 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002743 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002744 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002745 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002746 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002747 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002748 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002749 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002750 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002751 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002752 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002753 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002754
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002755 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002756 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002757 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002758 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002759 if (use_gisa)
2760 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002761 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002762
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002763 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002764out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002765 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002766 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002767 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002768 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002769 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002770}
2771
Christian Borntraegerd329c032008-11-26 14:50:27 +01002772void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2773{
Janosch Frank29b40f12019-09-30 04:19:18 -04002774 u16 rc, rrc;
2775
Christian Borntraegerd329c032008-11-26 14:50:27 +01002776 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002777 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002778 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002779 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002780 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002781 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002782
2783 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002784 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002785
Dominik Dingele6db1d62015-05-07 15:41:57 +02002786 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002787 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002788 /* We can not hold the vcpu mutex here, we are already dying */
2789 if (kvm_s390_pv_cpu_get_handle(vcpu))
2790 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002791 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002792}
2793
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002794void kvm_arch_destroy_vm(struct kvm *kvm)
2795{
Janosch Frank29b40f12019-09-30 04:19:18 -04002796 u16 rc, rrc;
2797
Marc Zyngier27592ae2021-11-16 16:03:57 +00002798 kvm_destroy_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002799 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002800 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002801 /*
2802 * We are already at the end of life and kvm->lock is not taken.
2803 * This is ok as the file descriptor is closed by now and nobody
2804 * can mess with the pv state. To avoid lockdep_assert_held from
2805 * complaining we do not use kvm_s390_pv_is_protected.
2806 */
2807 if (kvm_s390_pv_get_handle(kvm))
2808 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2809 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002810 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002811 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002812 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002813 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002814 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002815 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002816 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002817}
2818
2819/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002820static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2821{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002822 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002823 if (!vcpu->arch.gmap)
2824 return -ENOMEM;
2825 vcpu->arch.gmap->private = vcpu->kvm;
2826
2827 return 0;
2828}
2829
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002830static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2831{
David Hildenbranda6940672016-08-08 22:39:32 +02002832 if (!kvm_s390_use_sca_entries())
2833 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002834 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002835 if (vcpu->kvm->arch.use_esca) {
2836 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002837
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002838 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002839 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002840 } else {
2841 struct bsca_block *sca = vcpu->kvm->arch.sca;
2842
2843 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002844 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002845 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002846 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002847}
2848
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002849static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002850{
David Hildenbranda6940672016-08-08 22:39:32 +02002851 if (!kvm_s390_use_sca_entries()) {
2852 struct bsca_block *sca = vcpu->kvm->arch.sca;
2853
2854 /* we still need the basic sca for the ipte control */
2855 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2856 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002857 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002858 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002859 read_lock(&vcpu->kvm->arch.sca_lock);
2860 if (vcpu->kvm->arch.use_esca) {
2861 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002862
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002863 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002864 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2865 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002866 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002867 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002868 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002869 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002870
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002871 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002872 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2873 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002874 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002875 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002876 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002877}
2878
2879/* Basic SCA to Extended SCA data copy routines */
2880static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2881{
2882 d->sda = s->sda;
2883 d->sigp_ctrl.c = s->sigp_ctrl.c;
2884 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2885}
2886
2887static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2888{
2889 int i;
2890
2891 d->ipte_control = s->ipte_control;
2892 d->mcn[0] = s->mcn;
2893 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2894 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2895}
2896
2897static int sca_switch_to_extended(struct kvm *kvm)
2898{
2899 struct bsca_block *old_sca = kvm->arch.sca;
2900 struct esca_block *new_sca;
2901 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +00002902 unsigned long vcpu_idx;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002903 u32 scaol, scaoh;
2904
Janosch Frank29b40f12019-09-30 04:19:18 -04002905 if (kvm->arch.use_esca)
2906 return 0;
2907
Christian Borntraegerc4196212020-11-06 08:34:23 +01002908 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002909 if (!new_sca)
2910 return -ENOMEM;
2911
2912 scaoh = (u32)((u64)(new_sca) >> 32);
2913 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2914
2915 kvm_s390_vcpu_block_all(kvm);
2916 write_lock(&kvm->arch.sca_lock);
2917
2918 sca_copy_b_to_e(new_sca, old_sca);
2919
2920 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2921 vcpu->arch.sie_block->scaoh = scaoh;
2922 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002923 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002924 }
2925 kvm->arch.sca = new_sca;
2926 kvm->arch.use_esca = 1;
2927
2928 write_unlock(&kvm->arch.sca_lock);
2929 kvm_s390_vcpu_unblock_all(kvm);
2930
2931 free_page((unsigned long)old_sca);
2932
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002933 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2934 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002935 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002936}
2937
2938static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2939{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002940 int rc;
2941
David Hildenbranda6940672016-08-08 22:39:32 +02002942 if (!kvm_s390_use_sca_entries()) {
2943 if (id < KVM_MAX_VCPUS)
2944 return true;
2945 return false;
2946 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002947 if (id < KVM_S390_BSCA_CPU_SLOTS)
2948 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002949 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002950 return false;
2951
2952 mutex_lock(&kvm->lock);
2953 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2954 mutex_unlock(&kvm->lock);
2955
2956 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002957}
2958
David Hildenbranddb0758b2016-02-15 09:42:25 +01002959/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2960static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2961{
2962 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002963 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002964 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002965 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002966}
2967
2968/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2969static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2970{
2971 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002972 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002973 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2974 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002975 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002976}
2977
2978/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2979static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2980{
2981 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2982 vcpu->arch.cputm_enabled = true;
2983 __start_cpu_timer_accounting(vcpu);
2984}
2985
2986/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2987static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2988{
2989 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2990 __stop_cpu_timer_accounting(vcpu);
2991 vcpu->arch.cputm_enabled = false;
2992}
2993
2994static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2995{
2996 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2997 __enable_cpu_timer_accounting(vcpu);
2998 preempt_enable();
2999}
3000
3001static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3002{
3003 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3004 __disable_cpu_timer_accounting(vcpu);
3005 preempt_enable();
3006}
3007
David Hildenbrand4287f242016-02-15 09:40:12 +01003008/* set the cpu timer - may only be called from the VCPU thread itself */
3009void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3010{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003011 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003012 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003013 if (vcpu->arch.cputm_enabled)
3014 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003015 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003016 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003017 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003018}
3019
David Hildenbranddb0758b2016-02-15 09:42:25 +01003020/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003021__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3022{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003023 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003024 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003025
3026 if (unlikely(!vcpu->arch.cputm_enabled))
3027 return vcpu->arch.sie_block->cputm;
3028
David Hildenbrand9c23a132016-02-17 21:53:33 +01003029 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3030 do {
3031 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3032 /*
3033 * If the writer would ever execute a read in the critical
3034 * section, e.g. in irq context, we have a deadlock.
3035 */
3036 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3037 value = vcpu->arch.sie_block->cputm;
3038 /* if cputm_start is 0, accounting is being started/stopped */
3039 if (likely(vcpu->arch.cputm_start))
3040 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3041 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3042 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003043 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003044}
3045
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003046void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3047{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003048
David Hildenbrand37d9df92015-03-11 16:47:33 +01003049 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003050 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003051 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003052 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003053 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003054}
3055
3056void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3057{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003058 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003059 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003060 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003061 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003062 vcpu->arch.enabled_gmap = gmap_get_enabled();
3063 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003064
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003065}
3066
Dominik Dingel31928aa2014-12-04 15:47:07 +01003067void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003068{
Jason J. Herne72f25022014-11-25 09:46:02 -05003069 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003070 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003071 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003072 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003073 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003074 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003075 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003076 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003077 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003078 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003079 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3080 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003081 /* make vcpu_load load the right gmap on the first trigger */
3082 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003083}
3084
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003085static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3086{
3087 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3088 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3089 return true;
3090 return false;
3091}
3092
3093static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3094{
3095 /* At least one ECC subfunction must be present */
3096 return kvm_has_pckmo_subfunc(kvm, 32) ||
3097 kvm_has_pckmo_subfunc(kvm, 33) ||
3098 kvm_has_pckmo_subfunc(kvm, 34) ||
3099 kvm_has_pckmo_subfunc(kvm, 40) ||
3100 kvm_has_pckmo_subfunc(kvm, 41);
3101
3102}
3103
Tony Krowiak5102ee82014-06-27 14:46:01 -04003104static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3105{
Tony Krowiake585b242018-09-25 19:16:18 -04003106 /*
3107 * If the AP instructions are not being interpreted and the MSAX3
3108 * facility is not configured for the guest, there is nothing to set up.
3109 */
3110 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003111 return;
3112
Tony Krowiake585b242018-09-25 19:16:18 -04003113 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003114 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003115 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003116 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003117
Tony Krowiake585b242018-09-25 19:16:18 -04003118 if (vcpu->kvm->arch.crypto.apie)
3119 vcpu->arch.sie_block->eca |= ECA_APIE;
3120
3121 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003122 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003123 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003124 /* ecc is also wrapped with AES key */
3125 if (kvm_has_pckmo_ecc(vcpu->kvm))
3126 vcpu->arch.sie_block->ecd |= ECD_ECC;
3127 }
3128
Tony Krowiaka374e892014-09-03 10:13:53 +02003129 if (vcpu->kvm->arch.crypto.dea_kw)
3130 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003131}
3132
Dominik Dingelb31605c2014-03-25 13:47:11 +01003133void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3134{
3135 free_page(vcpu->arch.sie_block->cbrlo);
3136 vcpu->arch.sie_block->cbrlo = 0;
3137}
3138
3139int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3140{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003141 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003142 if (!vcpu->arch.sie_block->cbrlo)
3143 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003144 return 0;
3145}
3146
Michael Mueller91520f12015-02-27 14:32:11 +01003147static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3148{
3149 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3150
Michael Mueller91520f12015-02-27 14:32:11 +01003151 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003152 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003153 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003154}
3155
Sean Christophersonff72bb52019-12-18 13:55:20 -08003156static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3157{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003158 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003159 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003160
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003161 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3162 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003163 CPUSTAT_STOPPED);
3164
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003165 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003166 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003167 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003168 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003169
Michael Mueller91520f12015-02-27 14:32:11 +01003170 kvm_s390_vcpu_setup_model(vcpu);
3171
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003172 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3173 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003174 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003175 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003176 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003177 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003178 vcpu->arch.sie_block->ecb |= ECB_TE;
Janis Schoetterl-Glausch7119dec2021-06-29 10:55:30 +02003179 if (!kvm_is_ucontrol(vcpu->kvm))
3180 vcpu->arch.sie_block->ecb |= ECB_SPECI;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003181
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003182 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003183 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003184 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003185 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3186 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003187 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003188 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003189 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003190 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003191 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003192 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003193 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003194 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003195 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003196 vcpu->arch.sie_block->eca |= ECA_VX;
3197 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003198 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003199 if (test_kvm_facility(vcpu->kvm, 139))
3200 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003201 if (test_kvm_facility(vcpu->kvm, 156))
3202 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003203 if (vcpu->arch.sie_block->gd) {
3204 vcpu->arch.sie_block->eca |= ECA_AIV;
3205 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3206 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3207 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003208 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3209 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003210 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003211
3212 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003213 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003214 else
3215 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003216
Dominik Dingele6db1d62015-05-07 15:41:57 +02003217 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003218 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3219 if (rc)
3220 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003221 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003222 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003223 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003224
Collin Walling67d49d52018-08-31 12:51:19 -04003225 vcpu->arch.sie_block->hpid = HPID_KVM;
3226
Tony Krowiak5102ee82014-06-27 14:46:01 -04003227 kvm_s390_vcpu_crypto_setup(vcpu);
3228
Janosch Frank29b40f12019-09-30 04:19:18 -04003229 mutex_lock(&vcpu->kvm->lock);
3230 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3231 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3232 if (rc)
3233 kvm_s390_vcpu_unsetup_cmma(vcpu);
3234 }
3235 mutex_unlock(&vcpu->kvm->lock);
3236
Dominik Dingelb31605c2014-03-25 13:47:11 +01003237 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003238}
3239
Sean Christopherson897cc382019-12-18 13:55:09 -08003240int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3241{
3242 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3243 return -EINVAL;
3244 return 0;
3245}
3246
Sean Christophersone529ef62019-12-18 13:55:15 -08003247int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003248{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003249 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003250 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003251
QingFeng Haoda72ca42017-06-07 11:41:19 +02003252 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003253 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003254 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003255 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003256
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003257 vcpu->arch.sie_block = &sie_page->sie_block;
3258 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3259
David Hildenbrandefed1102015-04-16 12:32:41 +02003260 /* the real guest size will always be smaller than msl */
3261 vcpu->arch.sie_block->mso = 0;
3262 vcpu->arch.sie_block->msl = sclp.hamax;
3263
Sean Christophersone529ef62019-12-18 13:55:15 -08003264 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003265 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003266 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003267 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3268 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003269 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003270
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003271 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3272 kvm_clear_async_pf_completion_queue(vcpu);
3273 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3274 KVM_SYNC_GPRS |
3275 KVM_SYNC_ACRS |
3276 KVM_SYNC_CRS |
3277 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003278 KVM_SYNC_PFAULT |
3279 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003280 kvm_s390_set_prefix(vcpu, 0);
3281 if (test_kvm_facility(vcpu->kvm, 64))
3282 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3283 if (test_kvm_facility(vcpu->kvm, 82))
3284 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3285 if (test_kvm_facility(vcpu->kvm, 133))
3286 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3287 if (test_kvm_facility(vcpu->kvm, 156))
3288 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3289 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3290 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3291 */
3292 if (MACHINE_HAS_VX)
3293 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3294 else
3295 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3296
3297 if (kvm_is_ucontrol(vcpu->kvm)) {
3298 rc = __kvm_ucontrol_vcpu_init(vcpu);
3299 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003300 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003301 }
3302
Sean Christophersone529ef62019-12-18 13:55:15 -08003303 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3304 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3305 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003306
Sean Christophersonff72bb52019-12-18 13:55:20 -08003307 rc = kvm_s390_vcpu_setup(vcpu);
3308 if (rc)
3309 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003310 return 0;
3311
Sean Christophersonff72bb52019-12-18 13:55:20 -08003312out_ucontrol_uninit:
3313 if (kvm_is_ucontrol(vcpu->kvm))
3314 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003315out_free_sie_block:
3316 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003317 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003318}
3319
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003320int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3321{
Halil Pasic9b57e9d2021-10-19 19:53:59 +02003322 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
David Hildenbrand9a022062014-08-05 17:40:47 +02003323 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003324}
3325
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003326bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3327{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003328 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003329}
3330
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003331void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003332{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003333 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003334 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003335}
3336
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003337void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003338{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003339 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003340}
3341
Christian Borntraeger8e236542015-04-09 13:49:04 +02003342static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3343{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003344 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003345 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003346}
3347
David Hildenbrand9ea59722018-09-25 19:16:16 -04003348bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3349{
3350 return atomic_read(&vcpu->arch.sie_block->prog20) &
3351 (PROG_BLOCK_SIE | PROG_REQUEST);
3352}
3353
Christian Borntraeger8e236542015-04-09 13:49:04 +02003354static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3355{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003356 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003357}
3358
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003359/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003360 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003361 * If the CPU is not running (e.g. waiting as idle) the function will
3362 * return immediately. */
3363void exit_sie(struct kvm_vcpu *vcpu)
3364{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003365 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003366 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003367 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3368 cpu_relax();
3369}
3370
Christian Borntraeger8e236542015-04-09 13:49:04 +02003371/* Kick a guest cpu out of SIE to process a request synchronously */
3372void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003373{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003374 kvm_make_request(req, vcpu);
3375 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003376}
3377
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003378static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3379 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003380{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003381 struct kvm *kvm = gmap->private;
3382 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003383 unsigned long prefix;
Marc Zyngier46808a42021-11-16 16:04:02 +00003384 unsigned long i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003385
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003386 if (gmap_is_shadow(gmap))
3387 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003388 if (start >= 1UL << 31)
3389 /* We are only interested in prefix pages */
3390 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003391 kvm_for_each_vcpu(i, vcpu, kvm) {
3392 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003393 prefix = kvm_s390_get_prefix(vcpu);
3394 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3395 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3396 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003397 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003398 }
3399 }
3400}
3401
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003402bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3403{
3404 /* do not poll with more than halt_poll_max_steal percent of steal time */
3405 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
Sean Christopherson6f390912021-10-08 19:11:56 -07003406 READ_ONCE(halt_poll_max_steal)) {
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003407 vcpu->stat.halt_no_poll_steal++;
3408 return true;
3409 }
3410 return false;
3411}
3412
Christoffer Dallb6d33832012-03-08 16:44:24 -05003413int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3414{
3415 /* kvm common code refers to this, but never calls it */
3416 BUG();
3417 return 0;
3418}
3419
Carsten Otte14eebd92012-05-15 14:15:26 +02003420static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3421 struct kvm_one_reg *reg)
3422{
3423 int r = -EINVAL;
3424
3425 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003426 case KVM_REG_S390_TODPR:
3427 r = put_user(vcpu->arch.sie_block->todpr,
3428 (u32 __user *)reg->addr);
3429 break;
3430 case KVM_REG_S390_EPOCHDIFF:
3431 r = put_user(vcpu->arch.sie_block->epoch,
3432 (u64 __user *)reg->addr);
3433 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003434 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003435 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003436 (u64 __user *)reg->addr);
3437 break;
3438 case KVM_REG_S390_CLOCK_COMP:
3439 r = put_user(vcpu->arch.sie_block->ckc,
3440 (u64 __user *)reg->addr);
3441 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003442 case KVM_REG_S390_PFTOKEN:
3443 r = put_user(vcpu->arch.pfault_token,
3444 (u64 __user *)reg->addr);
3445 break;
3446 case KVM_REG_S390_PFCOMPARE:
3447 r = put_user(vcpu->arch.pfault_compare,
3448 (u64 __user *)reg->addr);
3449 break;
3450 case KVM_REG_S390_PFSELECT:
3451 r = put_user(vcpu->arch.pfault_select,
3452 (u64 __user *)reg->addr);
3453 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003454 case KVM_REG_S390_PP:
3455 r = put_user(vcpu->arch.sie_block->pp,
3456 (u64 __user *)reg->addr);
3457 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003458 case KVM_REG_S390_GBEA:
3459 r = put_user(vcpu->arch.sie_block->gbea,
3460 (u64 __user *)reg->addr);
3461 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003462 default:
3463 break;
3464 }
3465
3466 return r;
3467}
3468
3469static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3470 struct kvm_one_reg *reg)
3471{
3472 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003473 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003474
3475 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003476 case KVM_REG_S390_TODPR:
3477 r = get_user(vcpu->arch.sie_block->todpr,
3478 (u32 __user *)reg->addr);
3479 break;
3480 case KVM_REG_S390_EPOCHDIFF:
3481 r = get_user(vcpu->arch.sie_block->epoch,
3482 (u64 __user *)reg->addr);
3483 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003484 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003485 r = get_user(val, (u64 __user *)reg->addr);
3486 if (!r)
3487 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003488 break;
3489 case KVM_REG_S390_CLOCK_COMP:
3490 r = get_user(vcpu->arch.sie_block->ckc,
3491 (u64 __user *)reg->addr);
3492 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003493 case KVM_REG_S390_PFTOKEN:
3494 r = get_user(vcpu->arch.pfault_token,
3495 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003496 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3497 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003498 break;
3499 case KVM_REG_S390_PFCOMPARE:
3500 r = get_user(vcpu->arch.pfault_compare,
3501 (u64 __user *)reg->addr);
3502 break;
3503 case KVM_REG_S390_PFSELECT:
3504 r = get_user(vcpu->arch.pfault_select,
3505 (u64 __user *)reg->addr);
3506 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003507 case KVM_REG_S390_PP:
3508 r = get_user(vcpu->arch.sie_block->pp,
3509 (u64 __user *)reg->addr);
3510 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003511 case KVM_REG_S390_GBEA:
3512 r = get_user(vcpu->arch.sie_block->gbea,
3513 (u64 __user *)reg->addr);
3514 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003515 default:
3516 break;
3517 }
3518
3519 return r;
3520}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003521
Janosch Frank7de3f142020-01-31 05:02:02 -05003522static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003523{
Janosch Frank7de3f142020-01-31 05:02:02 -05003524 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3525 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3526 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3527
3528 kvm_clear_async_pf_completion_queue(vcpu);
3529 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3530 kvm_s390_vcpu_stop(vcpu);
3531 kvm_s390_clear_local_irqs(vcpu);
3532}
3533
3534static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3535{
3536 /* Initial reset is a superset of the normal reset */
3537 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3538
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003539 /*
3540 * This equals initial cpu reset in pop, but we don't switch to ESA.
3541 * We do not only reset the internal data, but also ...
3542 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003543 vcpu->arch.sie_block->gpsw.mask = 0;
3544 vcpu->arch.sie_block->gpsw.addr = 0;
3545 kvm_s390_set_prefix(vcpu, 0);
3546 kvm_s390_set_cpu_timer(vcpu, 0);
3547 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003548 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3549 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3550 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003551
3552 /* ... the data in sync regs */
3553 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3554 vcpu->run->s.regs.ckc = 0;
3555 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3556 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3557 vcpu->run->psw_addr = 0;
3558 vcpu->run->psw_mask = 0;
3559 vcpu->run->s.regs.todpr = 0;
3560 vcpu->run->s.regs.cputm = 0;
3561 vcpu->run->s.regs.ckc = 0;
3562 vcpu->run->s.regs.pp = 0;
3563 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003564 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003565 /*
3566 * Do not reset these registers in the protected case, as some of
3567 * them are overlayed and they are not accessible in this case
3568 * anyway.
3569 */
3570 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3571 vcpu->arch.sie_block->gbea = 1;
3572 vcpu->arch.sie_block->pp = 0;
3573 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3574 vcpu->arch.sie_block->todpr = 0;
3575 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003576}
3577
3578static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3579{
3580 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3581
3582 /* Clear reset is a superset of the initial reset */
3583 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3584
3585 memset(&regs->gprs, 0, sizeof(regs->gprs));
3586 memset(&regs->vrs, 0, sizeof(regs->vrs));
3587 memset(&regs->acrs, 0, sizeof(regs->acrs));
3588 memset(&regs->gscb, 0, sizeof(regs->gscb));
3589
3590 regs->etoken = 0;
3591 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003592}
3593
3594int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3595{
Christoffer Dall875656f2017-12-04 21:35:27 +01003596 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003597 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003598 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003599 return 0;
3600}
3601
3602int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3603{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003604 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003605 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003606 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003607 return 0;
3608}
3609
3610int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3611 struct kvm_sregs *sregs)
3612{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003613 vcpu_load(vcpu);
3614
Christian Borntraeger59674c12012-01-11 11:20:33 +01003615 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003616 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003617
3618 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003619 return 0;
3620}
3621
3622int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3623 struct kvm_sregs *sregs)
3624{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003625 vcpu_load(vcpu);
3626
Christian Borntraeger59674c12012-01-11 11:20:33 +01003627 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003628 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003629
3630 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003631 return 0;
3632}
3633
3634int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3635{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003636 int ret = 0;
3637
3638 vcpu_load(vcpu);
3639
3640 if (test_fp_ctl(fpu->fpc)) {
3641 ret = -EINVAL;
3642 goto out;
3643 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003644 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003645 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003646 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3647 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003648 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003649 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003650
3651out:
3652 vcpu_put(vcpu);
3653 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003654}
3655
3656int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3657{
Christoffer Dall13931232017-12-04 21:35:34 +01003658 vcpu_load(vcpu);
3659
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003660 /* make sure we have the latest values */
3661 save_fpu_regs();
3662 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003663 convert_vx_to_fp((freg_t *) fpu->fprs,
3664 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003665 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003666 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003667 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003668
3669 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003670 return 0;
3671}
3672
3673static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3674{
3675 int rc = 0;
3676
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003677 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003678 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003679 else {
3680 vcpu->run->psw_mask = psw.mask;
3681 vcpu->run->psw_addr = psw.addr;
3682 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003683 return rc;
3684}
3685
3686int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3687 struct kvm_translation *tr)
3688{
3689 return -EINVAL; /* not implemented yet */
3690}
3691
David Hildenbrand27291e22014-01-23 12:26:52 +01003692#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3693 KVM_GUESTDBG_USE_HW_BP | \
3694 KVM_GUESTDBG_ENABLE)
3695
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003696int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3697 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003698{
David Hildenbrand27291e22014-01-23 12:26:52 +01003699 int rc = 0;
3700
Christoffer Dall66b56562017-12-04 21:35:33 +01003701 vcpu_load(vcpu);
3702
David Hildenbrand27291e22014-01-23 12:26:52 +01003703 vcpu->guest_debug = 0;
3704 kvm_s390_clear_bp_data(vcpu);
3705
Christoffer Dall66b56562017-12-04 21:35:33 +01003706 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3707 rc = -EINVAL;
3708 goto out;
3709 }
3710 if (!sclp.has_gpere) {
3711 rc = -EINVAL;
3712 goto out;
3713 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003714
3715 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3716 vcpu->guest_debug = dbg->control;
3717 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003718 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003719
3720 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3721 rc = kvm_s390_import_bp_data(vcpu, dbg);
3722 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003723 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003724 vcpu->arch.guestdbg.last_bp = 0;
3725 }
3726
3727 if (rc) {
3728 vcpu->guest_debug = 0;
3729 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003730 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003731 }
3732
Christoffer Dall66b56562017-12-04 21:35:33 +01003733out:
3734 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003735 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003736}
3737
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003738int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3739 struct kvm_mp_state *mp_state)
3740{
Christoffer Dallfd232562017-12-04 21:35:30 +01003741 int ret;
3742
3743 vcpu_load(vcpu);
3744
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003745 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003746 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3747 KVM_MP_STATE_OPERATING;
3748
3749 vcpu_put(vcpu);
3750 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003751}
3752
3753int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3754 struct kvm_mp_state *mp_state)
3755{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003756 int rc = 0;
3757
Christoffer Dalle83dff52017-12-04 21:35:31 +01003758 vcpu_load(vcpu);
3759
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003760 /* user space knows about this interface - let it control the state */
Eric Farman67cf68b2021-10-08 22:31:12 +02003761 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003762
3763 switch (mp_state->mp_state) {
3764 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003765 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003766 break;
3767 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003768 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003769 break;
3770 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003771 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3772 rc = -ENXIO;
3773 break;
3774 }
3775 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3776 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003777 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003778 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003779 default:
3780 rc = -ENXIO;
3781 }
3782
Christoffer Dalle83dff52017-12-04 21:35:31 +01003783 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003784 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003785}
3786
David Hildenbrand8ad35752014-03-14 11:00:21 +01003787static bool ibs_enabled(struct kvm_vcpu *vcpu)
3788{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003789 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003790}
3791
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003792static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3793{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003794retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003795 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003796 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003797 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003798 /*
3799 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003800 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003801 * This ensures that the ipte instruction for this request has
3802 * already finished. We might race against a second unmapper that
3803 * wants to set the blocking bit. Lets just retry the request loop.
3804 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003805 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003806 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003807 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3808 kvm_s390_get_prefix(vcpu),
3809 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003810 if (rc) {
3811 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003812 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003813 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003814 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003815 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003816
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003817 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3818 vcpu->arch.sie_block->ihcpu = 0xffff;
3819 goto retry;
3820 }
3821
David Hildenbrand8ad35752014-03-14 11:00:21 +01003822 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3823 if (!ibs_enabled(vcpu)) {
3824 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003825 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003826 }
3827 goto retry;
3828 }
3829
3830 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3831 if (ibs_enabled(vcpu)) {
3832 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003833 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003834 }
3835 goto retry;
3836 }
3837
David Hildenbrand6502a342016-06-21 14:19:51 +02003838 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3839 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3840 goto retry;
3841 }
3842
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003843 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3844 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003845 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003846 * instruction manually, in order to provide additional
3847 * functionalities needed for live migration.
3848 */
3849 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3850 goto retry;
3851 }
3852
3853 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3854 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003855 * Re-enable CMM virtualization if CMMA is available and
3856 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003857 */
3858 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003859 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003860 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3861 goto retry;
3862 }
3863
David Hildenbrand0759d062014-05-13 16:54:32 +02003864 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003865 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003866 /* we left the vsie handler, nothing to do, just clear the request */
3867 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003868
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003869 return 0;
3870}
3871
David Hildenbrand0e7def52018-02-07 12:46:43 +01003872void kvm_s390_set_tod_clock(struct kvm *kvm,
3873 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003874{
3875 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003876 union tod_clock clk;
Marc Zyngier46808a42021-11-16 16:04:02 +00003877 unsigned long i;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003878
3879 mutex_lock(&kvm->lock);
3880 preempt_disable();
3881
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003882 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003883
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003884 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003885 kvm->arch.epdx = 0;
3886 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003887 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003888 if (kvm->arch.epoch > gtod->tod)
3889 kvm->arch.epdx -= 1;
3890 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003891
3892 kvm_s390_vcpu_block_all(kvm);
3893 kvm_for_each_vcpu(i, vcpu, kvm) {
3894 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3895 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3896 }
3897
3898 kvm_s390_vcpu_unblock_all(kvm);
3899 preempt_enable();
3900 mutex_unlock(&kvm->lock);
3901}
3902
Thomas Huthfa576c52014-05-06 17:20:16 +02003903/**
3904 * kvm_arch_fault_in_page - fault-in guest page if necessary
3905 * @vcpu: The corresponding virtual cpu
3906 * @gpa: Guest physical address
3907 * @writable: Whether the page should be writable or not
3908 *
3909 * Make sure that a guest page has been faulted-in on the host.
3910 *
3911 * Return: Zero on success, negative error code otherwise.
3912 */
3913long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003914{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003915 return gmap_fault(vcpu->arch.gmap, gpa,
3916 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003917}
3918
Dominik Dingel3c038e62013-10-07 17:11:48 +02003919static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3920 unsigned long token)
3921{
3922 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003923 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003924
3925 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003926 irq.u.ext.ext_params2 = token;
3927 irq.type = KVM_S390_INT_PFAULT_INIT;
3928 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003929 } else {
3930 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003931 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003932 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3933 }
3934}
3935
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003936bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003937 struct kvm_async_pf *work)
3938{
3939 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3940 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003941
3942 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003943}
3944
3945void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3946 struct kvm_async_pf *work)
3947{
3948 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3949 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3950}
3951
3952void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3953 struct kvm_async_pf *work)
3954{
3955 /* s390 will always inject the page directly */
3956}
3957
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003958bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003959{
3960 /*
3961 * s390 will always inject the page directly,
3962 * but we still want check_async_completion to cleanup
3963 */
3964 return true;
3965}
3966
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003967static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003968{
3969 hva_t hva;
3970 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003971
3972 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003973 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003974 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3975 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003976 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003977 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003978 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02003979 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003980 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003981 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003982 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003983 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003984 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003985
Heiko Carstens81480cc2014-01-01 16:36:07 +01003986 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3987 hva += current->thread.gmap_addr & ~PAGE_MASK;
3988 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003989 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003990
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003991 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02003992}
3993
Thomas Huth3fb4c402013-09-12 10:33:43 +02003994static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003995{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003996 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003997
Dominik Dingel3c038e62013-10-07 17:11:48 +02003998 /*
3999 * On s390 notifications for arriving pages will be delivered directly
4000 * to the guest but the house keeping for completed pfaults is
4001 * handled outside the worker.
4002 */
4003 kvm_check_async_pf_completion(vcpu);
4004
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004005 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4006 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004007
4008 if (need_resched())
4009 schedule();
4010
Jens Freimann79395032014-04-17 10:10:30 +02004011 if (!kvm_is_ucontrol(vcpu->kvm)) {
4012 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4013 if (rc)
4014 return rc;
4015 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004016
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004017 rc = kvm_s390_handle_requests(vcpu);
4018 if (rc)
4019 return rc;
4020
David Hildenbrand27291e22014-01-23 12:26:52 +01004021 if (guestdbg_enabled(vcpu)) {
4022 kvm_s390_backup_guest_per_regs(vcpu);
4023 kvm_s390_patch_guest_per_regs(vcpu);
4024 }
4025
Sean Christopherson4eeef242021-09-10 11:32:19 -07004026 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
Michael Mueller9f30f622019-01-31 09:52:44 +01004027
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004028 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004029 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4030 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4031 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004032
Thomas Huth3fb4c402013-09-12 10:33:43 +02004033 return 0;
4034}
4035
Thomas Huth492d8642015-02-10 16:11:01 +01004036static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4037{
David Hildenbrand56317922016-01-12 17:37:58 +01004038 struct kvm_s390_pgm_info pgm_info = {
4039 .code = PGM_ADDRESSING,
4040 };
4041 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004042 int rc;
4043
4044 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4045 trace_kvm_s390_sie_fault(vcpu);
4046
4047 /*
4048 * We want to inject an addressing exception, which is defined as a
4049 * suppressing or terminating exception. However, since we came here
4050 * by a DAT access exception, the PSW still points to the faulting
4051 * instruction since DAT exceptions are nullifying. So we've got
4052 * to look up the current opcode to get the length of the instruction
4053 * to be able to forward the PSW.
4054 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004055 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004056 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004057 if (rc < 0) {
4058 return rc;
4059 } else if (rc) {
4060 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4061 * Forward by arbitrary ilc, injection will take care of
4062 * nullification if necessary.
4063 */
4064 pgm_info = vcpu->arch.pgm;
4065 ilen = 4;
4066 }
David Hildenbrand56317922016-01-12 17:37:58 +01004067 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4068 kvm_s390_forward_psw(vcpu, ilen);
4069 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004070}
4071
Thomas Huth3fb4c402013-09-12 10:33:43 +02004072static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4073{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004074 struct mcck_volatile_info *mcck_info;
4075 struct sie_page *sie_page;
4076
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004077 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4078 vcpu->arch.sie_block->icptcode);
4079 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4080
David Hildenbrand27291e22014-01-23 12:26:52 +01004081 if (guestdbg_enabled(vcpu))
4082 kvm_s390_restore_guest_per_regs(vcpu);
4083
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004084 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4085 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004086
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004087 if (exit_reason == -EINTR) {
4088 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4089 sie_page = container_of(vcpu->arch.sie_block,
4090 struct sie_page, sie_block);
4091 mcck_info = &sie_page->mcck_info;
4092 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4093 return 0;
4094 }
4095
David Hildenbrand71f116b2015-10-19 16:24:28 +02004096 if (vcpu->arch.sie_block->icptcode > 0) {
4097 int rc = kvm_handle_sie_intercept(vcpu);
4098
4099 if (rc != -EOPNOTSUPP)
4100 return rc;
4101 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4102 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4103 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4104 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4105 return -EREMOTE;
4106 } else if (exit_reason != -EFAULT) {
4107 vcpu->stat.exit_null++;
4108 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004109 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4110 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4111 vcpu->run->s390_ucontrol.trans_exc_code =
4112 current->thread.gmap_addr;
4113 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004114 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004115 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004116 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004117 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004118 if (kvm_arch_setup_async_pf(vcpu))
4119 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004120 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004121 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004122 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004123 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004124}
4125
Janosch Frank3adae0b2019-12-13 08:26:06 -05004126#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004127static int __vcpu_run(struct kvm_vcpu *vcpu)
4128{
4129 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004130 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004131
Thomas Huth800c1062013-09-12 10:33:45 +02004132 /*
4133 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4134 * ning the guest), so that memslots (and other stuff) are protected
4135 */
4136 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4137
Thomas Hutha76ccff2013-09-12 10:33:44 +02004138 do {
4139 rc = vcpu_pre_run(vcpu);
4140 if (rc)
4141 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004142
Thomas Huth800c1062013-09-12 10:33:45 +02004143 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004144 /*
4145 * As PF_VCPU will be used in fault handler, between
4146 * guest_enter and guest_exit should be no uaccess.
4147 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004148 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004149 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004150 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004151 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004152 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4153 memcpy(sie_page->pv_grregs,
4154 vcpu->run->s.regs.gprs,
4155 sizeof(sie_page->pv_grregs));
4156 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004157 if (test_cpu_flag(CIF_FPU))
4158 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004159 exit_reason = sie64a(vcpu->arch.sie_block,
4160 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004161 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4162 memcpy(vcpu->run->s.regs.gprs,
4163 sie_page->pv_grregs,
4164 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004165 /*
4166 * We're not allowed to inject interrupts on intercepts
4167 * that leave the guest state in an "in-between" state
4168 * where the next SIE entry will do a continuation.
4169 * Fence interrupts in our "internal" PSW.
4170 */
4171 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4172 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4173 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4174 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004175 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004176 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004177 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004178 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004179 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004180 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004181
Thomas Hutha76ccff2013-09-12 10:33:44 +02004182 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004183 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004184
Thomas Huth800c1062013-09-12 10:33:45 +02004185 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004186 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004187}
4188
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004189static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004190{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004191 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004192 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004193 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004194
4195 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004196 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004197 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4198 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004199 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004200 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4201 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4202 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4203 }
4204 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4205 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4206 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4207 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004208 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4209 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004210 }
Collin Walling23a60f82020-06-22 11:46:36 -04004211 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4212 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4213 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
Collin Walling3fd84172021-10-26 22:54:51 -04004214 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
Collin Walling23a60f82020-06-22 11:46:36 -04004215 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004216 /*
4217 * If userspace sets the riccb (e.g. after migration) to a valid state,
4218 * we should enable RI here instead of doing the lazy enablement.
4219 */
4220 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004221 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004222 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004223 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004224 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004225 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004226 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004227 /*
4228 * If userspace sets the gscb (e.g. after migration) to non-zero,
4229 * we should enable GS here instead of doing the lazy enablement.
4230 */
4231 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4232 test_kvm_facility(vcpu->kvm, 133) &&
4233 gscb->gssm &&
4234 !vcpu->arch.gs_enabled) {
4235 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4236 vcpu->arch.sie_block->ecb |= ECB_GS;
4237 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4238 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004239 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004240 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4241 test_kvm_facility(vcpu->kvm, 82)) {
4242 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4243 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4244 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004245 if (MACHINE_HAS_GS) {
4246 preempt_disable();
4247 __ctl_set_bit(2, 4);
4248 if (current->thread.gs_cb) {
4249 vcpu->arch.host_gscb = current->thread.gs_cb;
4250 save_gs_cb(vcpu->arch.host_gscb);
4251 }
4252 if (vcpu->arch.gs_enabled) {
4253 current->thread.gs_cb = (struct gs_cb *)
4254 &vcpu->run->s.regs.gscb;
4255 restore_gs_cb(current->thread.gs_cb);
4256 }
4257 preempt_enable();
4258 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004259 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004260}
4261
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004262static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004263{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004264 struct kvm_run *kvm_run = vcpu->run;
4265
Janosch Frank811ea792019-06-14 13:11:21 +02004266 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4267 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4268 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4269 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4270 /* some control register changes require a tlb flush */
4271 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4272 }
4273 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4274 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4275 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4276 }
4277 save_access_regs(vcpu->arch.host_acrs);
4278 restore_access_regs(vcpu->run->s.regs.acrs);
4279 /* save host (userspace) fprs/vrs */
4280 save_fpu_regs();
4281 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4282 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4283 if (MACHINE_HAS_VX)
4284 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4285 else
4286 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4287 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4288 if (test_fp_ctl(current->thread.fpu.fpc))
4289 /* User space provided an invalid FPC, let's clear it */
4290 current->thread.fpu.fpc = 0;
4291
4292 /* Sync fmt2 only data */
4293 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004294 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004295 } else {
4296 /*
4297 * In several places we have to modify our internal view to
4298 * not do things that are disallowed by the ultravisor. For
4299 * example we must not inject interrupts after specific exits
4300 * (e.g. 112 prefix page not secure). We do this by turning
4301 * off the machine check, external and I/O interrupt bits
4302 * of our PSW copy. To avoid getting validity intercepts, we
4303 * do only accept the condition code from userspace.
4304 */
4305 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4306 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4307 PSW_MASK_CC;
4308 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004309
David Hildenbrandb028ee32014-07-17 10:47:43 +02004310 kvm_run->kvm_dirty_regs = 0;
4311}
4312
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004313static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004314{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004315 struct kvm_run *kvm_run = vcpu->run;
4316
David Hildenbrandb028ee32014-07-17 10:47:43 +02004317 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4318 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4319 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004320 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004321 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004322 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004323 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004324 __ctl_set_bit(2, 4);
4325 if (vcpu->arch.gs_enabled)
4326 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004327 current->thread.gs_cb = vcpu->arch.host_gscb;
4328 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004329 if (!vcpu->arch.host_gscb)
4330 __ctl_clear_bit(2, 4);
4331 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004332 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004333 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004334 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004335}
4336
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004337static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004338{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004339 struct kvm_run *kvm_run = vcpu->run;
4340
Janosch Frank811ea792019-06-14 13:11:21 +02004341 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4342 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4343 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4344 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4345 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4346 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4347 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4348 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4349 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4350 save_access_regs(vcpu->run->s.regs.acrs);
4351 restore_access_regs(vcpu->arch.host_acrs);
4352 /* Save guest register state */
4353 save_fpu_regs();
4354 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4355 /* Restore will be done lazily at return */
4356 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4357 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4358 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004359 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004360}
4361
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004362int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004363{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004364 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004365 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004366
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004367 if (kvm_run->immediate_exit)
4368 return -EINTR;
4369
Thomas Huth200824f2019-09-04 10:51:59 +02004370 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4371 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4372 return -EINVAL;
4373
Christoffer Dallaccb7572017-12-04 21:35:25 +01004374 vcpu_load(vcpu);
4375
David Hildenbrand27291e22014-01-23 12:26:52 +01004376 if (guestdbg_exit_pending(vcpu)) {
4377 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004378 rc = 0;
4379 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004380 }
4381
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004382 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004383
Janosch Frankfe28c7862019-05-15 13:24:30 +02004384 /*
4385 * no need to check the return value of vcpu_start as it can only have
4386 * an error for protvirt, but protvirt means user cpu state
4387 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004388 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4389 kvm_s390_vcpu_start(vcpu);
4390 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004391 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004392 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004393 rc = -EINVAL;
4394 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004395 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004396
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004397 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004398 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004399
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004400 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004401 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004402
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004403 if (signal_pending(current) && !rc) {
4404 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004405 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004406 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004407
David Hildenbrand27291e22014-01-23 12:26:52 +01004408 if (guestdbg_exit_pending(vcpu) && !rc) {
4409 kvm_s390_prepare_debug_exit(vcpu);
4410 rc = 0;
4411 }
4412
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004413 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004414 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004415 rc = 0;
4416 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004417
David Hildenbranddb0758b2016-02-15 09:42:25 +01004418 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004419 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004420
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004421 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004422
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004423 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004424out:
4425 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004426 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004427}
4428
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004429/*
4430 * store status at address
4431 * we use have two special cases:
4432 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4433 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4434 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004435int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004436{
Carsten Otte092670c2011-07-24 10:48:22 +02004437 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004438 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004439 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004440 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004441 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004442
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004443 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004444 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4445 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004446 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004447 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004448 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4449 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004450 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004451 gpa = px;
4452 } else
4453 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004454
4455 /* manually convert vector registers if necessary */
4456 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004457 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004458 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4459 fprs, 128);
4460 } else {
4461 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004462 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004463 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004464 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004465 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004466 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004467 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004468 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004469 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004470 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004471 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004472 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004473 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004474 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004475 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004476 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004477 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004478 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004479 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004480 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004481 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004482 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004483 &vcpu->arch.sie_block->gcr, 128);
4484 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004485}
4486
Thomas Huthe8798922013-11-06 15:46:33 +01004487int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4488{
4489 /*
4490 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004491 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004492 * it into the save area
4493 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004494 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004495 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004496 save_access_regs(vcpu->run->s.regs.acrs);
4497
4498 return kvm_s390_store_status_unloaded(vcpu, addr);
4499}
4500
David Hildenbrand8ad35752014-03-14 11:00:21 +01004501static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4502{
4503 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004504 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004505}
4506
4507static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4508{
Marc Zyngier46808a42021-11-16 16:04:02 +00004509 unsigned long i;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004510 struct kvm_vcpu *vcpu;
4511
4512 kvm_for_each_vcpu(i, vcpu, kvm) {
4513 __disable_ibs_on_vcpu(vcpu);
4514 }
4515}
4516
4517static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4518{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004519 if (!sclp.has_ibs)
4520 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004521 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004522 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004523}
4524
Janosch Frankfe28c7862019-05-15 13:24:30 +02004525int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004526{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004527 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004528
4529 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004530 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004531
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004532 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004533 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004534 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004535 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4536
Janosch Frankfe28c7862019-05-15 13:24:30 +02004537 /* Let's tell the UV that we want to change into the operating state */
4538 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4539 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4540 if (r) {
4541 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4542 return r;
4543 }
4544 }
4545
David Hildenbrand8ad35752014-03-14 11:00:21 +01004546 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004547 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
David Hildenbrand8ad35752014-03-14 11:00:21 +01004548 started_vcpus++;
4549 }
4550
4551 if (started_vcpus == 0) {
4552 /* we're the only active VCPU -> speed it up */
4553 __enable_ibs_on_vcpu(vcpu);
4554 } else if (started_vcpus == 1) {
4555 /*
4556 * As we are starting a second VCPU, we have to disable
4557 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304558 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004559 */
4560 __disable_ibs_on_all_vcpus(vcpu->kvm);
4561 }
4562
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004563 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004564 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004565 * The real PSW might have changed due to a RESTART interpreted by the
4566 * ultravisor. We block all interrupts and let the next sie exit
4567 * refresh our view.
4568 */
4569 if (kvm_s390_pv_cpu_is_protected(vcpu))
4570 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4571 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004572 * Another VCPU might have used IBS while we were offline.
4573 * Let's play safe and flush the VCPU at startup.
4574 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004575 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004576 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004577 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004578}
4579
Janosch Frankfe28c7862019-05-15 13:24:30 +02004580int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004581{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004582 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004583 struct kvm_vcpu *started_vcpu = NULL;
4584
4585 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004586 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004587
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004588 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004589 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004590 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004591 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4592
Janosch Frankfe28c7862019-05-15 13:24:30 +02004593 /* Let's tell the UV that we want to change into the stopped state */
4594 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4595 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4596 if (r) {
4597 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4598 return r;
4599 }
4600 }
4601
Eric Farman812de0462021-12-13 22:05:50 +01004602 /*
4603 * Set the VCPU to STOPPED and THEN clear the interrupt flag,
4604 * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
4605 * have been fully processed. This will ensure that the VCPU
4606 * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
4607 */
4608 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand6cddd432014-10-15 16:48:53 +02004609 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004610
David Hildenbrand8ad35752014-03-14 11:00:21 +01004611 __disable_ibs_on_vcpu(vcpu);
4612
4613 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004614 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
4615
4616 if (!is_vcpu_stopped(tmp)) {
David Hildenbrand8ad35752014-03-14 11:00:21 +01004617 started_vcpus++;
Marc Zyngier113d10b2021-11-16 16:03:59 +00004618 started_vcpu = tmp;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004619 }
4620 }
4621
4622 if (started_vcpus == 1) {
4623 /*
4624 * As we only have one VCPU left, we want to enable the
4625 * IBS facility for that VCPU to speed it up.
4626 */
4627 __enable_ibs_on_vcpu(started_vcpu);
4628 }
4629
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004630 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004631 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004632}
4633
Cornelia Huckd6712df2012-12-20 15:32:11 +01004634static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4635 struct kvm_enable_cap *cap)
4636{
4637 int r;
4638
4639 if (cap->flags)
4640 return -EINVAL;
4641
4642 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004643 case KVM_CAP_S390_CSS_SUPPORT:
4644 if (!vcpu->kvm->arch.css_support) {
4645 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004646 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004647 trace_kvm_s390_enable_css(vcpu->kvm);
4648 }
4649 r = 0;
4650 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004651 default:
4652 r = -EINVAL;
4653 break;
4654 }
4655 return r;
4656}
4657
Janosch Frank19e12272019-04-02 09:21:06 +02004658static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4659 struct kvm_s390_mem_op *mop)
4660{
4661 void __user *uaddr = (void __user *)mop->buf;
4662 int r = 0;
4663
4664 if (mop->flags || !mop->size)
4665 return -EINVAL;
4666 if (mop->size + mop->sida_offset < mop->size)
4667 return -EINVAL;
4668 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4669 return -E2BIG;
Janis Schoetterl-Glausch2c212e12022-01-28 15:06:43 +01004670 if (!kvm_s390_pv_cpu_is_protected(vcpu))
4671 return -EINVAL;
Janosch Frank19e12272019-04-02 09:21:06 +02004672
4673 switch (mop->op) {
4674 case KVM_S390_MEMOP_SIDA_READ:
4675 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4676 mop->sida_offset), mop->size))
4677 r = -EFAULT;
4678
4679 break;
4680 case KVM_S390_MEMOP_SIDA_WRITE:
4681 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4682 mop->sida_offset), uaddr, mop->size))
4683 r = -EFAULT;
4684 break;
4685 }
4686 return r;
4687}
Thomas Huth41408c282015-02-06 15:01:21 +01004688static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4689 struct kvm_s390_mem_op *mop)
4690{
4691 void __user *uaddr = (void __user *)mop->buf;
4692 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004693 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004694 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4695 | KVM_S390_MEMOP_F_CHECK_ONLY;
4696
Thomas Hutha13b03b2019-08-29 14:25:17 +02004697 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004698 return -EINVAL;
4699
4700 if (mop->size > MEM_OP_MAX_SIZE)
4701 return -E2BIG;
4702
Janosch Frank19e12272019-04-02 09:21:06 +02004703 if (kvm_s390_pv_cpu_is_protected(vcpu))
4704 return -EINVAL;
4705
Thomas Huth41408c282015-02-06 15:01:21 +01004706 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4707 tmpbuf = vmalloc(mop->size);
4708 if (!tmpbuf)
4709 return -ENOMEM;
4710 }
4711
Thomas Huth41408c282015-02-06 15:01:21 +01004712 switch (mop->op) {
4713 case KVM_S390_MEMOP_LOGICAL_READ:
4714 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004715 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4716 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004717 break;
4718 }
4719 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4720 if (r == 0) {
4721 if (copy_to_user(uaddr, tmpbuf, mop->size))
4722 r = -EFAULT;
4723 }
4724 break;
4725 case KVM_S390_MEMOP_LOGICAL_WRITE:
4726 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004727 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4728 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004729 break;
4730 }
4731 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4732 r = -EFAULT;
4733 break;
4734 }
4735 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4736 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004737 }
4738
Thomas Huth41408c282015-02-06 15:01:21 +01004739 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4740 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4741
4742 vfree(tmpbuf);
4743 return r;
4744}
4745
Janosch Frank19e12272019-04-02 09:21:06 +02004746static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4747 struct kvm_s390_mem_op *mop)
4748{
4749 int r, srcu_idx;
4750
4751 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4752
4753 switch (mop->op) {
4754 case KVM_S390_MEMOP_LOGICAL_READ:
4755 case KVM_S390_MEMOP_LOGICAL_WRITE:
4756 r = kvm_s390_guest_mem_op(vcpu, mop);
4757 break;
4758 case KVM_S390_MEMOP_SIDA_READ:
4759 case KVM_S390_MEMOP_SIDA_WRITE:
4760 /* we are locked against sida going away by the vcpu->mutex */
4761 r = kvm_s390_guest_sida_op(vcpu, mop);
4762 break;
4763 default:
4764 r = -EINVAL;
4765 }
4766
4767 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4768 return r;
4769}
4770
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004771long kvm_arch_vcpu_async_ioctl(struct file *filp,
4772 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004773{
4774 struct kvm_vcpu *vcpu = filp->private_data;
4775 void __user *argp = (void __user *)arg;
4776
Avi Kivity93736622010-05-13 12:35:17 +03004777 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004778 case KVM_S390_IRQ: {
4779 struct kvm_s390_irq s390irq;
4780
Jens Freimann47b43c52014-11-11 20:57:06 +01004781 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004782 return -EFAULT;
4783 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004784 }
Avi Kivity93736622010-05-13 12:35:17 +03004785 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004786 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004787 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004788
4789 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004790 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004791 if (s390int_to_s390irq(&s390int, &s390irq))
4792 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004793 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004794 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004795 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004796 return -ENOIOCTLCMD;
4797}
4798
4799long kvm_arch_vcpu_ioctl(struct file *filp,
4800 unsigned int ioctl, unsigned long arg)
4801{
4802 struct kvm_vcpu *vcpu = filp->private_data;
4803 void __user *argp = (void __user *)arg;
4804 int idx;
4805 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004806 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004807
4808 vcpu_load(vcpu);
4809
4810 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004811 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004812 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004813 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004814 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004815 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004816 case KVM_S390_SET_INITIAL_PSW: {
4817 psw_t psw;
4818
Avi Kivitybc923cc2010-05-13 12:21:46 +03004819 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004820 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004821 break;
4822 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4823 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004824 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004825 case KVM_S390_CLEAR_RESET:
4826 r = 0;
4827 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004828 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4829 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4830 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4831 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4832 rc, rrc);
4833 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004834 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004835 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004836 r = 0;
4837 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004838 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4839 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4840 UVC_CMD_CPU_RESET_INITIAL,
4841 &rc, &rrc);
4842 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4843 rc, rrc);
4844 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004845 break;
4846 case KVM_S390_NORMAL_RESET:
4847 r = 0;
4848 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004849 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4850 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4851 UVC_CMD_CPU_RESET, &rc, &rrc);
4852 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4853 rc, rrc);
4854 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004855 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004856 case KVM_SET_ONE_REG:
4857 case KVM_GET_ONE_REG: {
4858 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004859 r = -EINVAL;
4860 if (kvm_s390_pv_cpu_is_protected(vcpu))
4861 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004862 r = -EFAULT;
4863 if (copy_from_user(&reg, argp, sizeof(reg)))
4864 break;
4865 if (ioctl == KVM_SET_ONE_REG)
4866 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4867 else
4868 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4869 break;
4870 }
Carsten Otte27e03932012-01-04 10:25:21 +01004871#ifdef CONFIG_KVM_S390_UCONTROL
4872 case KVM_S390_UCAS_MAP: {
4873 struct kvm_s390_ucas_mapping ucasmap;
4874
4875 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4876 r = -EFAULT;
4877 break;
4878 }
4879
4880 if (!kvm_is_ucontrol(vcpu->kvm)) {
4881 r = -EINVAL;
4882 break;
4883 }
4884
4885 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4886 ucasmap.vcpu_addr, ucasmap.length);
4887 break;
4888 }
4889 case KVM_S390_UCAS_UNMAP: {
4890 struct kvm_s390_ucas_mapping ucasmap;
4891
4892 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4893 r = -EFAULT;
4894 break;
4895 }
4896
4897 if (!kvm_is_ucontrol(vcpu->kvm)) {
4898 r = -EINVAL;
4899 break;
4900 }
4901
4902 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4903 ucasmap.length);
4904 break;
4905 }
4906#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004907 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004908 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004909 break;
4910 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004911 case KVM_ENABLE_CAP:
4912 {
4913 struct kvm_enable_cap cap;
4914 r = -EFAULT;
4915 if (copy_from_user(&cap, argp, sizeof(cap)))
4916 break;
4917 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4918 break;
4919 }
Thomas Huth41408c282015-02-06 15:01:21 +01004920 case KVM_S390_MEM_OP: {
4921 struct kvm_s390_mem_op mem_op;
4922
4923 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004924 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004925 else
4926 r = -EFAULT;
4927 break;
4928 }
Jens Freimann816c7662014-11-24 17:13:46 +01004929 case KVM_S390_SET_IRQ_STATE: {
4930 struct kvm_s390_irq_state irq_state;
4931
4932 r = -EFAULT;
4933 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4934 break;
4935 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4936 irq_state.len == 0 ||
4937 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4938 r = -EINVAL;
4939 break;
4940 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004941 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004942 r = kvm_s390_set_irq_state(vcpu,
4943 (void __user *) irq_state.buf,
4944 irq_state.len);
4945 break;
4946 }
4947 case KVM_S390_GET_IRQ_STATE: {
4948 struct kvm_s390_irq_state irq_state;
4949
4950 r = -EFAULT;
4951 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4952 break;
4953 if (irq_state.len == 0) {
4954 r = -EINVAL;
4955 break;
4956 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004957 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004958 r = kvm_s390_get_irq_state(vcpu,
4959 (__u8 __user *) irq_state.buf,
4960 irq_state.len);
4961 break;
4962 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004963 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004964 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004965 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004966
4967 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004968 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004969}
4970
Souptick Joarder1499fa82018-04-19 00:49:58 +05304971vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004972{
4973#ifdef CONFIG_KVM_S390_UCONTROL
4974 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4975 && (kvm_is_ucontrol(vcpu->kvm))) {
4976 vmf->page = virt_to_page(vcpu->arch.sie_block);
4977 get_page(vmf->page);
4978 return 0;
4979 }
4980#endif
4981 return VM_FAULT_SIGBUS;
4982}
4983
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004984/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004985int kvm_arch_prepare_memory_region(struct kvm *kvm,
Sean Christopherson537a17b2021-12-06 20:54:11 +01004986 const struct kvm_memory_slot *old,
4987 struct kvm_memory_slot *new,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004988 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004989{
Sean Christophersonec5c8692021-12-06 20:54:21 +01004990 gpa_t size;
4991
4992 /* When we are protected, we should not change the memory slots */
4993 if (kvm_s390_pv_get_handle(kvm))
4994 return -EINVAL;
4995
4996 if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
4997 return 0;
Sean Christophersoncf5b4862021-12-06 20:54:15 +01004998
Nick Wangdd2887e2013-03-25 17:22:57 +01004999 /* A few sanity checks. We can have memory slots which have to be
5000 located/ended at a segment boundary (1MB). The memory in userland is
5001 ok to be fragmented into various different vmas. It is okay to mmap()
5002 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005003
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005004 if (new->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005005 return -EINVAL;
5006
Sean Christophersonec5c8692021-12-06 20:54:21 +01005007 size = new->npages * PAGE_SIZE;
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005008 if (size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005009 return -EINVAL;
5010
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005011 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
Dominik Dingela3a92c32014-12-01 17:24:42 +01005012 return -EINVAL;
5013
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005014 return 0;
5015}
5016
5017void kvm_arch_commit_memory_region(struct kvm *kvm,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005018 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005019 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005020 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005021{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005022 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005023
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005024 switch (change) {
5025 case KVM_MR_DELETE:
5026 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5027 old->npages * PAGE_SIZE);
5028 break;
5029 case KVM_MR_MOVE:
5030 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5031 old->npages * PAGE_SIZE);
5032 if (rc)
5033 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005034 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005035 case KVM_MR_CREATE:
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005036 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5037 new->base_gfn * PAGE_SIZE,
5038 new->npages * PAGE_SIZE);
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005039 break;
5040 case KVM_MR_FLAGS_ONLY:
5041 break;
5042 default:
5043 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5044 }
Carsten Otte598841c2011-07-24 10:48:21 +02005045 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005046 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005047 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005048}
5049
Alexander Yarygin60a37702016-04-01 15:38:57 +03005050static inline unsigned long nonhyp_mask(int i)
5051{
5052 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5053
5054 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5055}
5056
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005057static int __init kvm_s390_init(void)
5058{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005059 int i;
5060
David Hildenbrand07197fd2015-01-30 16:01:38 +01005061 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005062 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005063 return -ENODEV;
5064 }
5065
Janosch Franka4499382018-07-13 11:28:31 +01005066 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005067 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005068 return -EINVAL;
5069 }
5070
Alexander Yarygin60a37702016-04-01 15:38:57 +03005071 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005072 kvm_s390_fac_base[i] |=
Sven Schnelle17e89e12021-05-05 22:01:10 +02005073 stfle_fac_list[i] & nonhyp_mask(i);
Alexander Yarygin60a37702016-04-01 15:38:57 +03005074
Michael Mueller9d8d5782015-02-02 15:42:51 +01005075 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005076}
5077
5078static void __exit kvm_s390_exit(void)
5079{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005080 kvm_exit();
5081}
5082
5083module_init(kvm_s390_init);
5084module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005085
5086/*
5087 * Enable autoloading of the kvm module.
5088 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5089 * since x86 takes a different approach.
5090 */
5091#include <linux/miscdevice.h>
5092MODULE_ALIAS_MISCDEV(KVM_MINOR);
5093MODULE_ALIAS("devname:kvm");