blob: 631be750af08b19508e451d8c693cfa9d6b847bc [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000069
70const struct kvm_stats_header kvm_vm_stats_header = {
71 .name_size = KVM_STATS_NAME_SIZE,
72 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
73 .id_offset = sizeof(struct kvm_stats_header),
74 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
75 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
76 sizeof(kvm_vm_stats_desc),
77};
78
Jing Zhangce55c042021-06-18 22:27:06 +000079const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
80 KVM_GENERIC_VCPU_STATS(),
81 STATS_DESC_COUNTER(VCPU, exit_userspace),
82 STATS_DESC_COUNTER(VCPU, exit_null),
83 STATS_DESC_COUNTER(VCPU, exit_external_request),
84 STATS_DESC_COUNTER(VCPU, exit_io_request),
85 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
86 STATS_DESC_COUNTER(VCPU, exit_stop_request),
87 STATS_DESC_COUNTER(VCPU, exit_validity),
88 STATS_DESC_COUNTER(VCPU, exit_instruction),
89 STATS_DESC_COUNTER(VCPU, exit_pei),
90 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
91 STATS_DESC_COUNTER(VCPU, instruction_lctl),
92 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
93 STATS_DESC_COUNTER(VCPU, instruction_stctl),
94 STATS_DESC_COUNTER(VCPU, instruction_stctg),
95 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
96 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
97 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
98 STATS_DESC_COUNTER(VCPU, deliver_ckc),
99 STATS_DESC_COUNTER(VCPU, deliver_cputm),
100 STATS_DESC_COUNTER(VCPU, deliver_external_call),
101 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
102 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
103 STATS_DESC_COUNTER(VCPU, deliver_virtio),
104 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
106 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_program),
108 STATS_DESC_COUNTER(VCPU, deliver_io),
109 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
110 STATS_DESC_COUNTER(VCPU, exit_wait_state),
111 STATS_DESC_COUNTER(VCPU, inject_ckc),
112 STATS_DESC_COUNTER(VCPU, inject_cputm),
113 STATS_DESC_COUNTER(VCPU, inject_external_call),
114 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
115 STATS_DESC_COUNTER(VCPU, inject_mchk),
116 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
117 STATS_DESC_COUNTER(VCPU, inject_program),
118 STATS_DESC_COUNTER(VCPU, inject_restart),
119 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
120 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
121 STATS_DESC_COUNTER(VCPU, instruction_epsw),
122 STATS_DESC_COUNTER(VCPU, instruction_gs),
123 STATS_DESC_COUNTER(VCPU, instruction_io_other),
124 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
125 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
126 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
127 STATS_DESC_COUNTER(VCPU, instruction_ptff),
128 STATS_DESC_COUNTER(VCPU, instruction_sck),
129 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
130 STATS_DESC_COUNTER(VCPU, instruction_stidp),
131 STATS_DESC_COUNTER(VCPU, instruction_spx),
132 STATS_DESC_COUNTER(VCPU, instruction_stpx),
133 STATS_DESC_COUNTER(VCPU, instruction_stap),
134 STATS_DESC_COUNTER(VCPU, instruction_iske),
135 STATS_DESC_COUNTER(VCPU, instruction_ri),
136 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
137 STATS_DESC_COUNTER(VCPU, instruction_sske),
138 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
139 STATS_DESC_COUNTER(VCPU, instruction_stsi),
140 STATS_DESC_COUNTER(VCPU, instruction_stfl),
141 STATS_DESC_COUNTER(VCPU, instruction_tb),
142 STATS_DESC_COUNTER(VCPU, instruction_tpi),
143 STATS_DESC_COUNTER(VCPU, instruction_tprot),
144 STATS_DESC_COUNTER(VCPU, instruction_tsch),
145 STATS_DESC_COUNTER(VCPU, instruction_sie),
146 STATS_DESC_COUNTER(VCPU, instruction_essa),
147 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
148 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
149 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
Christian Borntraegerbb000f642021-07-26 17:01:08 +0200164 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
165 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
167 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
168 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
169 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
170 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
Jing Zhangce55c042021-06-18 22:27:06 +0000173 STATS_DESC_COUNTER(VCPU, pfault_sync)
174};
Jing Zhangce55c042021-06-18 22:27:06 +0000175
176const struct kvm_stats_header kvm_vcpu_stats_header = {
177 .name_size = KVM_STATS_NAME_SIZE,
178 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
179 .id_offset = sizeof(struct kvm_stats_header),
180 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
181 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
182 sizeof(kvm_vcpu_stats_desc),
183};
184
David Hildenbranda411edf2016-02-02 15:41:22 +0100185/* allow nested virtualization in KVM (if enabled by user space) */
186static int nested;
187module_param(nested, int, S_IRUGO);
188MODULE_PARM_DESC(nested, "Nested virtualization support");
189
Janosch Franka4499382018-07-13 11:28:31 +0100190/* allow 1m huge page guest backing, if !nested */
191static int hpage;
192module_param(hpage, int, 0444);
193MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100194
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500195/* maximum percentage of steal time for polling. >100 is treated like 100 */
196static u8 halt_poll_max_steal = 10;
197module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000198MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199
Michael Muellercc674ef2020-02-27 10:10:31 +0100200/* if set to true, the GISA will be initialized and used if available */
201static bool use_gisa = true;
202module_param(use_gisa, bool, 0644);
203MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
204
Pierre Morel87e28a12020-09-07 15:26:07 +0200205/* maximum diag9c forwarding per second */
206unsigned int diag9c_forwarding_hz;
207module_param(diag9c_forwarding_hz, uint, 0644);
208MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
209
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000210/*
211 * For now we handle at most 16 double words as this is what the s390 base
212 * kernel handles and stores in the prefix page. If we ever need to go beyond
213 * this, this requires changes to code, but the external uapi can stay.
214 */
215#define SIZE_INTERNAL 16
216
217/*
218 * Base feature mask that defines default mask for facilities. Consists of the
219 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
220 */
221static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
222/*
223 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
224 * and defines the facilities that can be enabled via a cpu model.
225 */
226static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
227
228static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200229{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000230 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
231 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
232 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
Sven Schnelle17e89e12021-05-05 22:01:10 +0200233 sizeof(stfle_fac_list));
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234
235 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200236}
237
David Hildenbrand15c97052015-03-19 17:36:43 +0100238/* available cpu features supported by kvm */
239static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200240/* available subfunctions indicated via query / "test bit" */
241static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100242
Michael Mueller9d8d5782015-02-02 15:42:51 +0100243static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200244static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200245debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400246debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100248/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200249int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250{
251 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200252 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253}
254
Sean Christophersonb9904082020-03-21 13:25:55 -0700255int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700256{
257 return 0;
258}
259
Janosch Frank29b40f12019-09-30 04:19:18 -0400260/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100261static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
262 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400263static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200264
David Hildenbrand15757672018-02-07 12:46:45 +0100265static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
266{
267 u8 delta_idx = 0;
268
269 /*
270 * The TOD jumps by delta, we have to compensate this by adding
271 * -delta to the epoch.
272 */
273 delta = -delta;
274
275 /* sign-extension - we're adding to signed values below */
276 if ((s64)delta < 0)
277 delta_idx = -1;
278
279 scb->epoch += delta;
280 if (scb->ecd & ECD_MEF) {
281 scb->epdx += delta_idx;
282 if (scb->epoch < delta)
283 scb->epdx += 1;
284 }
285}
286
Fan Zhangfdf03652015-05-13 10:58:41 +0200287/*
288 * This callback is executed during stop_machine(). All CPUs are therefore
289 * temporarily stopped. In order not to change guest behavior, we have to
290 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
291 * so a CPU won't be stopped while calculating with the epoch.
292 */
293static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
294 void *v)
295{
296 struct kvm *kvm;
297 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000298 unsigned long i;
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 unsigned long long *delta = v;
300
301 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100303 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
304 if (i == 0) {
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
307 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100308 if (vcpu->arch.cputm_enabled)
309 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100310 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100311 kvm_clock_sync_scb(vcpu->arch.vsie_block,
312 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200313 }
314 }
315 return NOTIFY_OK;
316}
317
318static struct notifier_block kvm_clock_notifier = {
319 .notifier_call = kvm_clock_sync,
320};
321
Sean Christophersonb9904082020-03-21 13:25:55 -0700322int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200324 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100325 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200326 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
327 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200328 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
329 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330 return 0;
331}
332
333void kvm_arch_hardware_unsetup(void)
334{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100335 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200336 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200337 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
338 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339}
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341static void allow_cpu_feat(unsigned long nr)
342{
343 set_bit_inv(nr, kvm_s390_available_cpu_feat);
344}
345
David Hildenbrand0a763c72016-05-18 16:03:47 +0200346static inline int plo_test_bit(unsigned char nr)
347{
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200348 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100349 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350
351 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200352 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 /* Parameter registers are ignored for "test bit" */
354 " plo 0,0,0,0(0)\n"
355 " ipm %0\n"
356 " srl %0,28\n"
357 : "=d" (cc)
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200358 : [function] "d" (function)
359 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200360 return cc == 0;
361}
362
Heiko Carstensd0dea732019-10-02 14:34:37 +0200363static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500364{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500365 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200366 " lghi 0,0\n"
367 " lgr 1,%[query]\n"
368 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500369 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200370 :
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200371 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
372 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500373}
374
Christian Borntraeger173aec22018-12-28 10:59:06 +0100375#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100376#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100377
David Hildenbrand22be5a132016-01-21 13:22:54 +0100378static void kvm_s390_cpu_feat_init(void)
379{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 int i;
381
382 for (i = 0; i < 256; ++i) {
383 if (plo_test_bit(i))
384 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
385 }
386
387 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400388 ptff(kvm_s390_available_subfunc.ptff,
389 sizeof(kvm_s390_available_subfunc.ptff),
390 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200391
392 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
394 kvm_s390_available_subfunc.kmac);
395 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kmc);
397 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.km);
399 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kimd);
401 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200403 }
404 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200405 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200408 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kmctr);
410 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
411 kvm_s390_available_subfunc.kmf);
412 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmo);
414 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200416 }
417 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100418 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200419 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400421 if (test_facility(146)) /* MSA8 */
422 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
423 kvm_s390_available_subfunc.kma);
424
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100425 if (test_facility(155)) /* MSA9 */
426 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kdsa);
428
Christian Borntraeger173aec22018-12-28 10:59:06 +0100429 if (test_facility(150)) /* SORTL */
430 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
431
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100432 if (test_facility(151)) /* DFLTCC */
433 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
434
David Hildenbrand22be5a132016-01-21 13:22:54 +0100435 if (MACHINE_HAS_ESOP)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200437 /*
438 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
439 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
440 */
441 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100442 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200443 return;
444 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100445 if (sclp.has_64bscao)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100447 if (sclp.has_siif)
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100449 if (sclp.has_gpere)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100451 if (sclp.has_gsls)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100453 if (sclp.has_ib)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100455 if (sclp.has_cei)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100457 if (sclp.has_ibs)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500459 if (sclp.has_kss)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200461 /*
462 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
463 * all skey handling functions read/set the skey from the PGSTE
464 * instead of the real storage key.
465 *
466 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
467 * pages being detected as preserved although they are resident.
468 *
469 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
470 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
471 *
472 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
473 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
474 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
475 *
476 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
477 * cannot easily shadow the SCA because of the ipte lock.
478 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100479}
480
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100481int kvm_arch_init(void *opaque)
482{
Janosch Frankf76f6372019-10-02 03:56:27 -0400483 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100484
Christian Borntraeger78f26132015-07-22 15:50:58 +0200485 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
486 if (!kvm_s390_dbf)
487 return -ENOMEM;
488
Janosch Frank3e6c5562019-10-02 04:46:58 -0400489 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf_uv)
491 goto out;
492
493 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
494 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400495 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200496
David Hildenbrand22be5a132016-01-21 13:22:54 +0100497 kvm_s390_cpu_feat_init();
498
Cornelia Huck84877d92014-09-02 10:27:35 +0100499 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100500 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
501 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100502 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400503 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100505
506 rc = kvm_s390_gib_init(GAL_ISC);
507 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400508 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
Michael Mueller308c3e62018-11-30 15:32:06 +0100510 return 0;
511
Janosch Frankf76f6372019-10-02 03:56:27 -0400512out:
513 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100515}
516
Christian Borntraeger78f26132015-07-22 15:50:58 +0200517void kvm_arch_exit(void)
518{
Michael Mueller1282c212019-01-31 09:52:40 +0100519 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200520 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400521 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200522}
523
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100524/* Section: device related */
525long kvm_arch_dev_ioctl(struct file *filp,
526 unsigned int ioctl, unsigned long arg)
527{
528 if (ioctl == KVM_S390_ENABLE_SIE)
529 return s390_enable_sie();
530 return -EINVAL;
531}
532
Alexander Graf784aa3d2014-07-14 18:27:35 +0200533int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100534{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 int r;
536
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200537 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100538 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200539 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100540 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100541#ifdef CONFIG_KVM_S390_UCONTROL
542 case KVM_CAP_S390_UCONTROL:
543#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200544 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100545 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200546 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100547 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100548 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100549 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200550 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200551 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200552 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200553 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100554 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100555 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200556 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100557 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400558 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100559 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200560 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200561 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100562 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100563 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500564 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400565 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400566 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 r = 1;
568 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300569 case KVM_CAP_SET_GUEST_DEBUG2:
570 r = KVM_GUESTDBG_VALID_MASK;
571 break;
Janosch Franka4499382018-07-13 11:28:31 +0100572 case KVM_CAP_S390_HPAGE_1M:
573 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100574 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100575 r = 1;
576 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100577 case KVM_CAP_S390_MEM_OP:
578 r = MEM_OP_MAX_SIZE;
579 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200580 case KVM_CAP_NR_VCPUS:
581 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200582 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100583 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200584 if (!kvm_s390_use_sca_entries())
585 r = KVM_MAX_VCPUS;
586 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_ESCA_CPU_SLOTS;
Vitaly Kuznetsov82cc27e2021-11-16 17:34:42 +0100588 if (ext == KVM_CAP_NR_VCPUS)
589 r = min_t(unsigned int, num_online_cpus(), r);
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200590 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200591 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100592 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200593 break;
Eric Farman68c55752014-06-09 10:57:26 -0400594 case KVM_CAP_S390_VECTOR_REGISTERS:
595 r = MACHINE_HAS_VX;
596 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800597 case KVM_CAP_S390_RI:
598 r = test_facility(64);
599 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100600 case KVM_CAP_S390_GS:
601 r = test_facility(133);
602 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100603 case KVM_CAP_S390_BPB:
604 r = test_facility(82);
605 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500606 case KVM_CAP_S390_PROTECTED:
607 r = is_prot_virt_host();
608 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200609 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100610 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200611 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100612 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100613}
614
Sean Christopherson0dff0842020-02-18 13:07:29 -0800615void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400616{
Janosch Frank0959e162018-07-17 13:21:22 +0100617 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100619 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100621 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400622
Janosch Frank0959e162018-07-17 13:21:22 +0100623 /* Loop over all guest segments */
624 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400625 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100626 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
627 gaddr = gfn_to_gpa(cur_gfn);
628 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
629 if (kvm_is_error_hva(vmaddr))
630 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400631
Janosch Frank0959e162018-07-17 13:21:22 +0100632 bitmap_zero(bitmap, _PAGE_ENTRIES);
633 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
634 for (i = 0; i < _PAGE_ENTRIES; i++) {
635 if (test_bit(i, bitmap))
636 mark_page_dirty(kvm, cur_gfn + i);
637 }
638
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100639 if (fatal_signal_pending(current))
640 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100641 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400642 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400643}
644
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100645/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200646static void sca_del_vcpu(struct kvm_vcpu *vcpu);
647
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100648/*
649 * Get (and clear) the dirty memory log for a memory slot.
650 */
651int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
652 struct kvm_dirty_log *log)
653{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400654 int r;
655 unsigned long n;
656 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800657 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400658
Janosch Franke1e8a962017-02-02 16:39:31 +0100659 if (kvm_is_ucontrol(kvm))
660 return -EINVAL;
661
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400662 mutex_lock(&kvm->slots_lock);
663
664 r = -EINVAL;
665 if (log->slot >= KVM_USER_MEM_SLOTS)
666 goto out;
667
Sean Christopherson2a49f612020-02-18 13:07:30 -0800668 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400669 if (r)
670 goto out;
671
672 /* Clear the dirty log */
673 if (is_dirty) {
674 n = kvm_dirty_bitmap_bytes(memslot);
675 memset(memslot->dirty_bitmap, 0, n);
676 }
677 r = 0;
678out:
679 mutex_unlock(&kvm->slots_lock);
680 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681}
682
David Hildenbrand6502a342016-06-21 14:19:51 +0200683static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
684{
Marc Zyngier46808a42021-11-16 16:04:02 +0000685 unsigned long i;
David Hildenbrand6502a342016-06-21 14:19:51 +0200686 struct kvm_vcpu *vcpu;
687
688 kvm_for_each_vcpu(i, vcpu, kvm) {
689 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
690 }
691}
692
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100693int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200694{
695 int r;
696
697 if (cap->flags)
698 return -EINVAL;
699
700 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200701 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200702 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200703 kvm->arch.use_irqchip = 1;
704 r = 0;
705 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200706 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200707 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200708 kvm->arch.user_sigp = 1;
709 r = 0;
710 break;
Eric Farman68c55752014-06-09 10:57:26 -0400711 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100712 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200713 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 r = -EBUSY;
715 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100716 set_kvm_facility(kvm->arch.model.fac_mask, 129);
717 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200718 if (test_facility(134)) {
719 set_kvm_facility(kvm->arch.model.fac_mask, 134);
720 set_kvm_facility(kvm->arch.model.fac_list, 134);
721 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100722 if (test_facility(135)) {
723 set_kvm_facility(kvm->arch.model.fac_mask, 135);
724 set_kvm_facility(kvm->arch.model.fac_list, 135);
725 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100726 if (test_facility(148)) {
727 set_kvm_facility(kvm->arch.model.fac_mask, 148);
728 set_kvm_facility(kvm->arch.model.fac_list, 148);
729 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100730 if (test_facility(152)) {
731 set_kvm_facility(kvm->arch.model.fac_mask, 152);
732 set_kvm_facility(kvm->arch.model.fac_list, 152);
733 }
Christian Borntraeger1f703d22021-01-25 13:39:45 +0100734 if (test_facility(192)) {
735 set_kvm_facility(kvm->arch.model.fac_mask, 192);
736 set_kvm_facility(kvm->arch.model.fac_list, 192);
737 }
Michael Mueller18280d82015-03-16 16:05:41 +0100738 r = 0;
739 } else
740 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100741 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200742 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
743 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400744 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800745 case KVM_CAP_S390_RI:
746 r = -EINVAL;
747 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200748 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800749 r = -EBUSY;
750 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100751 set_kvm_facility(kvm->arch.model.fac_mask, 64);
752 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800753 r = 0;
754 }
755 mutex_unlock(&kvm->lock);
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
757 r ? "(not available)" : "(success)");
758 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100759 case KVM_CAP_S390_AIS:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus) {
762 r = -EBUSY;
763 } else {
764 set_kvm_facility(kvm->arch.model.fac_mask, 72);
765 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100766 r = 0;
767 }
768 mutex_unlock(&kvm->lock);
769 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
770 r ? "(not available)" : "(success)");
771 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100772 case KVM_CAP_S390_GS:
773 r = -EINVAL;
774 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100775 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100776 r = -EBUSY;
777 } else if (test_facility(133)) {
778 set_kvm_facility(kvm->arch.model.fac_mask, 133);
779 set_kvm_facility(kvm->arch.model.fac_list, 133);
780 r = 0;
781 }
782 mutex_unlock(&kvm->lock);
783 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
784 r ? "(not available)" : "(success)");
785 break;
Janosch Franka4499382018-07-13 11:28:31 +0100786 case KVM_CAP_S390_HPAGE_1M:
787 mutex_lock(&kvm->lock);
788 if (kvm->created_vcpus)
789 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100790 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100791 r = -EINVAL;
792 else {
793 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700794 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100795 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700796 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100797 /*
798 * We might have to create fake 4k page
799 * tables. To avoid that the hardware works on
800 * stale PGSTEs, we emulate these instructions.
801 */
802 kvm->arch.use_skf = 0;
803 kvm->arch.use_pfmfi = 0;
804 }
805 mutex_unlock(&kvm->lock);
806 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
807 r ? "(not available)" : "(success)");
808 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100809 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200810 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100811 kvm->arch.user_stsi = 1;
812 r = 0;
813 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200814 case KVM_CAP_S390_USER_INSTR0:
815 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
816 kvm->arch.user_instr0 = 1;
817 icpt_operexc_on_all_vcpus(kvm);
818 r = 0;
819 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200820 default:
821 r = -EINVAL;
822 break;
823 }
824 return r;
825}
826
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100827static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
828{
829 int ret;
830
831 switch (attr->attr) {
832 case KVM_S390_VM_MEM_LIMIT_SIZE:
833 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200834 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100835 kvm->arch.mem_limit);
836 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100837 ret = -EFAULT;
838 break;
839 default:
840 ret = -ENXIO;
841 break;
842 }
843 return ret;
844}
845
846static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200847{
848 int ret;
849 unsigned int idx;
850 switch (attr->attr) {
851 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100852 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100853 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200854 break;
855
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200856 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200857 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100858 if (kvm->created_vcpus)
859 ret = -EBUSY;
860 else if (kvm->mm->context.allow_gmap_hpage_1m)
861 ret = -EINVAL;
862 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200863 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100864 /* Not compatible with cmma. */
865 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200866 ret = 0;
867 }
868 mutex_unlock(&kvm->lock);
869 break;
870 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100871 ret = -ENXIO;
872 if (!sclp.has_cmma)
873 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200874 ret = -EINVAL;
875 if (!kvm->arch.use_cmma)
876 break;
877
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200878 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200879 mutex_lock(&kvm->lock);
880 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200881 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200882 srcu_read_unlock(&kvm->srcu, idx);
883 mutex_unlock(&kvm->lock);
884 ret = 0;
885 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100886 case KVM_S390_VM_MEM_LIMIT_SIZE: {
887 unsigned long new_limit;
888
889 if (kvm_is_ucontrol(kvm))
890 return -EINVAL;
891
892 if (get_user(new_limit, (u64 __user *)attr->addr))
893 return -EFAULT;
894
Dominik Dingela3a92c32014-12-01 17:24:42 +0100895 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
896 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100897 return -E2BIG;
898
Dominik Dingela3a92c32014-12-01 17:24:42 +0100899 if (!new_limit)
900 return -EINVAL;
901
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100902 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100903 if (new_limit != KVM_S390_NO_MEM_LIMIT)
904 new_limit -= 1;
905
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100906 ret = -EBUSY;
907 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200908 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100909 /* gmap_create will round the limit up */
910 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100911
912 if (!new) {
913 ret = -ENOMEM;
914 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100915 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100916 new->private = kvm;
917 kvm->arch.gmap = new;
918 ret = 0;
919 }
920 }
921 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100922 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
923 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
924 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100925 break;
926 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200927 default:
928 ret = -ENXIO;
929 break;
930 }
931 return ret;
932}
933
Tony Krowiaka374e892014-09-03 10:13:53 +0200934static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
935
Tony Krowiak20c922f2018-04-22 11:37:03 -0400936void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200937{
938 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000939 unsigned long i;
Tony Krowiaka374e892014-09-03 10:13:53 +0200940
Tony Krowiak20c922f2018-04-22 11:37:03 -0400941 kvm_s390_vcpu_block_all(kvm);
942
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400943 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400944 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400945 /* recreate the shadow crycb by leaving the VSIE handler */
946 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
947 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400948
949 kvm_s390_vcpu_unblock_all(kvm);
950}
951
952static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
953{
Tony Krowiaka374e892014-09-03 10:13:53 +0200954 mutex_lock(&kvm->lock);
955 switch (attr->attr) {
956 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200957 if (!test_kvm_facility(kvm, 76)) {
958 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400959 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200960 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200961 get_random_bytes(
962 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
963 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
964 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200965 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 break;
967 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200968 if (!test_kvm_facility(kvm, 76)) {
969 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400970 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200971 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200972 get_random_bytes(
973 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
974 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
975 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200976 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 break;
978 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200979 if (!test_kvm_facility(kvm, 76)) {
980 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400981 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200982 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200983 kvm->arch.crypto.aes_kw = 0;
984 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
985 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200986 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200987 break;
988 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200989 if (!test_kvm_facility(kvm, 76)) {
990 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400991 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200992 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200993 kvm->arch.crypto.dea_kw = 0;
994 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
995 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200996 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200997 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400998 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
999 if (!ap_instructions_available()) {
1000 mutex_unlock(&kvm->lock);
1001 return -EOPNOTSUPP;
1002 }
1003 kvm->arch.crypto.apie = 1;
1004 break;
1005 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1006 if (!ap_instructions_available()) {
1007 mutex_unlock(&kvm->lock);
1008 return -EOPNOTSUPP;
1009 }
1010 kvm->arch.crypto.apie = 0;
1011 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001012 default:
1013 mutex_unlock(&kvm->lock);
1014 return -ENXIO;
1015 }
1016
Tony Krowiak20c922f2018-04-22 11:37:03 -04001017 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001018 mutex_unlock(&kvm->lock);
1019 return 0;
1020}
1021
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001022static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1023{
Marc Zyngier46808a42021-11-16 16:04:02 +00001024 unsigned long cx;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 struct kvm_vcpu *vcpu;
1026
1027 kvm_for_each_vcpu(cx, vcpu, kvm)
1028 kvm_s390_sync_request(req, vcpu);
1029}
1030
1031/*
1032 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001033 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001034 */
1035static int kvm_s390_vm_start_migration(struct kvm *kvm)
1036{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001039 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 int slotnr;
1041
1042 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001043 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001045 slots = kvm_memslots(kvm);
1046 if (!slots || !slots->used_slots)
1047 return -EINVAL;
1048
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001049 if (!kvm->arch.use_cmma) {
1050 kvm->arch.migration_mode = 1;
1051 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001053 /* mark all the pages in active slots as dirty */
1054 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1055 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001056 if (!ms->dirty_bitmap)
1057 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001058 /*
1059 * The second half of the bitmap is only used on x86,
1060 * and would be wasted otherwise, so we put it to good
1061 * use here to keep track of the state of the storage
1062 * attributes.
1063 */
1064 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1065 ram_pages += ms->npages;
1066 }
1067 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1068 kvm->arch.migration_mode = 1;
1069 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070 return 0;
1071}
1072
1073/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001074 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001075 * kvm_s390_vm_start_migration.
1076 */
1077static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1078{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001079 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001080 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001081 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001082 kvm->arch.migration_mode = 0;
1083 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001084 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001085 return 0;
1086}
1087
1088static int kvm_s390_vm_set_migration(struct kvm *kvm,
1089 struct kvm_device_attr *attr)
1090{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001091 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001092
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001093 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001094 switch (attr->attr) {
1095 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001096 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001097 break;
1098 case KVM_S390_VM_MIGRATION_STOP:
1099 res = kvm_s390_vm_stop_migration(kvm);
1100 break;
1101 default:
1102 break;
1103 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001104 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001105
1106 return res;
1107}
1108
1109static int kvm_s390_vm_get_migration(struct kvm *kvm,
1110 struct kvm_device_attr *attr)
1111{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001112 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001113
1114 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1115 return -ENXIO;
1116
1117 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1118 return -EFAULT;
1119 return 0;
1120}
1121
Collin L. Walling8fa16962016-07-26 15:29:44 -04001122static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1123{
1124 struct kvm_s390_vm_tod_clock gtod;
1125
1126 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1127 return -EFAULT;
1128
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001130 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001131 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001132
1133 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1134 gtod.epoch_idx, gtod.tod);
1135
1136 return 0;
1137}
1138
Jason J. Herne72f25022014-11-25 09:46:02 -05001139static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1140{
1141 u8 gtod_high;
1142
1143 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1144 sizeof(gtod_high)))
1145 return -EFAULT;
1146
1147 if (gtod_high != 0)
1148 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001149 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001150
1151 return 0;
1152}
1153
1154static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1155{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001156 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001157
David Hildenbrand0e7def52018-02-07 12:46:43 +01001158 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1159 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001160 return -EFAULT;
1161
David Hildenbrand0e7def52018-02-07 12:46:43 +01001162 kvm_s390_set_tod_clock(kvm, &gtod);
1163 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001164 return 0;
1165}
1166
1167static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1168{
1169 int ret;
1170
1171 if (attr->flags)
1172 return -EINVAL;
1173
1174 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001175 case KVM_S390_VM_TOD_EXT:
1176 ret = kvm_s390_set_tod_ext(kvm, attr);
1177 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001178 case KVM_S390_VM_TOD_HIGH:
1179 ret = kvm_s390_set_tod_high(kvm, attr);
1180 break;
1181 case KVM_S390_VM_TOD_LOW:
1182 ret = kvm_s390_set_tod_low(kvm, attr);
1183 break;
1184 default:
1185 ret = -ENXIO;
1186 break;
1187 }
1188 return ret;
1189}
1190
David Hildenbrand33d1b272018-04-27 14:36:13 +02001191static void kvm_s390_get_tod_clock(struct kvm *kvm,
1192 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001193{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001194 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001195
1196 preempt_disable();
1197
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001198 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001199
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001200 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001201 gtod->epoch_idx = 0;
1202 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001203 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1204 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001205 gtod->epoch_idx += 1;
1206 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001207
1208 preempt_enable();
1209}
1210
1211static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1212{
1213 struct kvm_s390_vm_tod_clock gtod;
1214
1215 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001216 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001217 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1218 return -EFAULT;
1219
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1221 gtod.epoch_idx, gtod.tod);
1222 return 0;
1223}
1224
Jason J. Herne72f25022014-11-25 09:46:02 -05001225static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1226{
1227 u8 gtod_high = 0;
1228
1229 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1230 sizeof(gtod_high)))
1231 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001232 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001233
1234 return 0;
1235}
1236
1237static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1238{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001239 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001240
David Hildenbrand60417fc2015-09-29 16:20:36 +02001241 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001242 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1243 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001244 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001245
1246 return 0;
1247}
1248
1249static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1250{
1251 int ret;
1252
1253 if (attr->flags)
1254 return -EINVAL;
1255
1256 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001257 case KVM_S390_VM_TOD_EXT:
1258 ret = kvm_s390_get_tod_ext(kvm, attr);
1259 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001260 case KVM_S390_VM_TOD_HIGH:
1261 ret = kvm_s390_get_tod_high(kvm, attr);
1262 break;
1263 case KVM_S390_VM_TOD_LOW:
1264 ret = kvm_s390_get_tod_low(kvm, attr);
1265 break;
1266 default:
1267 ret = -ENXIO;
1268 break;
1269 }
1270 return ret;
1271}
1272
Michael Mueller658b6ed2015-02-02 15:49:35 +01001273static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1274{
1275 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001276 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001277 int ret = 0;
1278
1279 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001280 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001281 ret = -EBUSY;
1282 goto out;
1283 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001284 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001285 if (!proc) {
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289 if (!copy_from_user(proc, (void __user *)attr->addr,
1290 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001291 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001292 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1293 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001294 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001295 if (proc->ibc > unblocked_ibc)
1296 kvm->arch.model.ibc = unblocked_ibc;
1297 else if (proc->ibc < lowest_ibc)
1298 kvm->arch.model.ibc = lowest_ibc;
1299 else
1300 kvm->arch.model.ibc = proc->ibc;
1301 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001302 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001303 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001304 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1305 kvm->arch.model.ibc,
1306 kvm->arch.model.cpuid);
1307 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1308 kvm->arch.model.fac_list[0],
1309 kvm->arch.model.fac_list[1],
1310 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001311 } else
1312 ret = -EFAULT;
1313 kfree(proc);
1314out:
1315 mutex_unlock(&kvm->lock);
1316 return ret;
1317}
1318
David Hildenbrand15c97052015-03-19 17:36:43 +01001319static int kvm_s390_set_processor_feat(struct kvm *kvm,
1320 struct kvm_device_attr *attr)
1321{
1322 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001323
1324 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1325 return -EFAULT;
1326 if (!bitmap_subset((unsigned long *) data.feat,
1327 kvm_s390_available_cpu_feat,
1328 KVM_S390_VM_CPU_FEAT_NR_BITS))
1329 return -EINVAL;
1330
1331 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001332 if (kvm->created_vcpus) {
1333 mutex_unlock(&kvm->lock);
1334 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001335 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001336 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1337 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001338 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001339 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1340 data.feat[0],
1341 data.feat[1],
1342 data.feat[2]);
1343 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001344}
1345
David Hildenbrand0a763c72016-05-18 16:03:47 +02001346static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1347 struct kvm_device_attr *attr)
1348{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001349 mutex_lock(&kvm->lock);
1350 if (kvm->created_vcpus) {
1351 mutex_unlock(&kvm->lock);
1352 return -EBUSY;
1353 }
1354
1355 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1356 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1357 mutex_unlock(&kvm->lock);
1358 return -EFAULT;
1359 }
1360 mutex_unlock(&kvm->lock);
1361
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001362 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1367 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1370 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1373 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1376 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1379 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1382 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1383 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1385 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1388 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1389 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1391 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1394 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1397 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1398 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1399 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1400 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1401 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1402 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1403 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1404 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001406 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1408 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001409 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001414 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001419
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001420 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001421}
1422
Michael Mueller658b6ed2015-02-02 15:49:35 +01001423static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1424{
1425 int ret = -ENXIO;
1426
1427 switch (attr->attr) {
1428 case KVM_S390_VM_CPU_PROCESSOR:
1429 ret = kvm_s390_set_processor(kvm, attr);
1430 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001431 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1432 ret = kvm_s390_set_processor_feat(kvm, attr);
1433 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001434 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1435 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1436 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001437 }
1438 return ret;
1439}
1440
1441static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1442{
1443 struct kvm_s390_vm_cpu_processor *proc;
1444 int ret = 0;
1445
Christian Borntraegerc4196212020-11-06 08:34:23 +01001446 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001447 if (!proc) {
1448 ret = -ENOMEM;
1449 goto out;
1450 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001451 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001452 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001453 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1454 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001455 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1456 kvm->arch.model.ibc,
1457 kvm->arch.model.cpuid);
1458 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1459 kvm->arch.model.fac_list[0],
1460 kvm->arch.model.fac_list[1],
1461 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001462 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1463 ret = -EFAULT;
1464 kfree(proc);
1465out:
1466 return ret;
1467}
1468
1469static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1470{
1471 struct kvm_s390_vm_cpu_machine *mach;
1472 int ret = 0;
1473
Christian Borntraegerc4196212020-11-06 08:34:23 +01001474 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001475 if (!mach) {
1476 ret = -ENOMEM;
1477 goto out;
1478 }
1479 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001480 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001481 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001482 S390_ARCH_FAC_LIST_SIZE_BYTE);
Sven Schnelle17e89e12021-05-05 22:01:10 +02001483 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1484 sizeof(stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001485 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1486 kvm->arch.model.ibc,
1487 kvm->arch.model.cpuid);
1488 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1489 mach->fac_mask[0],
1490 mach->fac_mask[1],
1491 mach->fac_mask[2]);
1492 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1493 mach->fac_list[0],
1494 mach->fac_list[1],
1495 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001496 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1497 ret = -EFAULT;
1498 kfree(mach);
1499out:
1500 return ret;
1501}
1502
David Hildenbrand15c97052015-03-19 17:36:43 +01001503static int kvm_s390_get_processor_feat(struct kvm *kvm,
1504 struct kvm_device_attr *attr)
1505{
1506 struct kvm_s390_vm_cpu_feat data;
1507
1508 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1509 KVM_S390_VM_CPU_FEAT_NR_BITS);
1510 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1511 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001512 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1513 data.feat[0],
1514 data.feat[1],
1515 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001516 return 0;
1517}
1518
1519static int kvm_s390_get_machine_feat(struct kvm *kvm,
1520 struct kvm_device_attr *attr)
1521{
1522 struct kvm_s390_vm_cpu_feat data;
1523
1524 bitmap_copy((unsigned long *) data.feat,
1525 kvm_s390_available_cpu_feat,
1526 KVM_S390_VM_CPU_FEAT_NR_BITS);
1527 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1528 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001529 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1530 data.feat[0],
1531 data.feat[1],
1532 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001533 return 0;
1534}
1535
David Hildenbrand0a763c72016-05-18 16:03:47 +02001536static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1537 struct kvm_device_attr *attr)
1538{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001539 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1540 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1541 return -EFAULT;
1542
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001543 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1548 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1551 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1554 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1557 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1560 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1563 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1564 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1566 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1569 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1570 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1572 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1575 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1578 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1579 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1580 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1581 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1583 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1584 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001587 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1589 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001590 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001595 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001600
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001601 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001602}
1603
1604static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1605 struct kvm_device_attr *attr)
1606{
1607 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1608 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1609 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001610
1611 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1612 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1613 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1614 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1615 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1616 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1619 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1622 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1623 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1624 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1625 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1628 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1631 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1632 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1633 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1634 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1635 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1636 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1637 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1638 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1639 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1640 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1641 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1642 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1643 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1644 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1645 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1646 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1647 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1648 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1649 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1650 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1651 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1652 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1653 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1654 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001655 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1656 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1657 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001658 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1659 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1660 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1661 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1662 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001663 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1664 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1665 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1666 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1667 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001668
David Hildenbrand0a763c72016-05-18 16:03:47 +02001669 return 0;
1670}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001671
Michael Mueller658b6ed2015-02-02 15:49:35 +01001672static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1673{
1674 int ret = -ENXIO;
1675
1676 switch (attr->attr) {
1677 case KVM_S390_VM_CPU_PROCESSOR:
1678 ret = kvm_s390_get_processor(kvm, attr);
1679 break;
1680 case KVM_S390_VM_CPU_MACHINE:
1681 ret = kvm_s390_get_machine(kvm, attr);
1682 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001683 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1684 ret = kvm_s390_get_processor_feat(kvm, attr);
1685 break;
1686 case KVM_S390_VM_CPU_MACHINE_FEAT:
1687 ret = kvm_s390_get_machine_feat(kvm, attr);
1688 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001689 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1690 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1691 break;
1692 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1693 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1694 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001695 }
1696 return ret;
1697}
1698
Dominik Dingelf2061652014-04-09 13:13:00 +02001699static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1700{
1701 int ret;
1702
1703 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001704 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001705 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001706 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001707 case KVM_S390_VM_TOD:
1708 ret = kvm_s390_set_tod(kvm, attr);
1709 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001710 case KVM_S390_VM_CPU_MODEL:
1711 ret = kvm_s390_set_cpu_model(kvm, attr);
1712 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001713 case KVM_S390_VM_CRYPTO:
1714 ret = kvm_s390_vm_set_crypto(kvm, attr);
1715 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001716 case KVM_S390_VM_MIGRATION:
1717 ret = kvm_s390_vm_set_migration(kvm, attr);
1718 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001719 default:
1720 ret = -ENXIO;
1721 break;
1722 }
1723
1724 return ret;
1725}
1726
1727static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1728{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001729 int ret;
1730
1731 switch (attr->group) {
1732 case KVM_S390_VM_MEM_CTRL:
1733 ret = kvm_s390_get_mem_control(kvm, attr);
1734 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001735 case KVM_S390_VM_TOD:
1736 ret = kvm_s390_get_tod(kvm, attr);
1737 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001738 case KVM_S390_VM_CPU_MODEL:
1739 ret = kvm_s390_get_cpu_model(kvm, attr);
1740 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001741 case KVM_S390_VM_MIGRATION:
1742 ret = kvm_s390_vm_get_migration(kvm, attr);
1743 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001744 default:
1745 ret = -ENXIO;
1746 break;
1747 }
1748
1749 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001750}
1751
1752static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1753{
1754 int ret;
1755
1756 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001757 case KVM_S390_VM_MEM_CTRL:
1758 switch (attr->attr) {
1759 case KVM_S390_VM_MEM_ENABLE_CMMA:
1760 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001761 ret = sclp.has_cmma ? 0 : -ENXIO;
1762 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001763 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001764 ret = 0;
1765 break;
1766 default:
1767 ret = -ENXIO;
1768 break;
1769 }
1770 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001771 case KVM_S390_VM_TOD:
1772 switch (attr->attr) {
1773 case KVM_S390_VM_TOD_LOW:
1774 case KVM_S390_VM_TOD_HIGH:
1775 ret = 0;
1776 break;
1777 default:
1778 ret = -ENXIO;
1779 break;
1780 }
1781 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001782 case KVM_S390_VM_CPU_MODEL:
1783 switch (attr->attr) {
1784 case KVM_S390_VM_CPU_PROCESSOR:
1785 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001786 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1787 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001788 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001789 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001790 ret = 0;
1791 break;
1792 default:
1793 ret = -ENXIO;
1794 break;
1795 }
1796 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001797 case KVM_S390_VM_CRYPTO:
1798 switch (attr->attr) {
1799 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1800 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1801 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1802 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1803 ret = 0;
1804 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001805 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1806 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1807 ret = ap_instructions_available() ? 0 : -ENXIO;
1808 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001809 default:
1810 ret = -ENXIO;
1811 break;
1812 }
1813 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001814 case KVM_S390_VM_MIGRATION:
1815 ret = 0;
1816 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001817 default:
1818 ret = -ENXIO;
1819 break;
1820 }
1821
1822 return ret;
1823}
1824
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001825static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1826{
1827 uint8_t *keys;
1828 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001829 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830
1831 if (args->flags != 0)
1832 return -EINVAL;
1833
1834 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001835 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001836 return KVM_S390_GET_SKEYS_NONE;
1837
1838 /* Enforce sane limit on memory allocation */
1839 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1840 return -EINVAL;
1841
Christian Borntraegerc4196212020-11-06 08:34:23 +01001842 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001843 if (!keys)
1844 return -ENOMEM;
1845
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001846 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001847 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001848 for (i = 0; i < args->count; i++) {
1849 hva = gfn_to_hva(kvm, args->start_gfn + i);
1850 if (kvm_is_error_hva(hva)) {
1851 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001852 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001853 }
1854
David Hildenbrand154c8c12016-05-09 11:22:34 +02001855 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1856 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001857 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001858 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001859 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001860 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001861
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001862 if (!r) {
1863 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1864 sizeof(uint8_t) * args->count);
1865 if (r)
1866 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867 }
1868
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001869 kvfree(keys);
1870 return r;
1871}
1872
1873static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1874{
1875 uint8_t *keys;
1876 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001877 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001878 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001879
1880 if (args->flags != 0)
1881 return -EINVAL;
1882
1883 /* Enforce sane limit on memory allocation */
1884 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1885 return -EINVAL;
1886
Christian Borntraegerc4196212020-11-06 08:34:23 +01001887 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001888 if (!keys)
1889 return -ENOMEM;
1890
1891 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1892 sizeof(uint8_t) * args->count);
1893 if (r) {
1894 r = -EFAULT;
1895 goto out;
1896 }
1897
1898 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001899 r = s390_enable_skey();
1900 if (r)
1901 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001902
Janosch Frankbd096f62018-07-18 13:40:22 +01001903 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001904 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001905 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001906 while (i < args->count) {
1907 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001908 hva = gfn_to_hva(kvm, args->start_gfn + i);
1909 if (kvm_is_error_hva(hva)) {
1910 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001911 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001912 }
1913
1914 /* Lowest order bit is reserved */
1915 if (keys[i] & 0x01) {
1916 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001917 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001918 }
1919
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001920 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001921 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001922 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001923 FAULT_FLAG_WRITE, &unlocked);
1924 if (r)
1925 break;
1926 }
1927 if (!r)
1928 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001929 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001930 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001931 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001932out:
1933 kvfree(keys);
1934 return r;
1935}
1936
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001937/*
1938 * Base address and length must be sent at the start of each block, therefore
1939 * it's cheaper to send some clean data, as long as it's less than the size of
1940 * two longs.
1941 */
1942#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1943/* for consistency */
1944#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1945
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001946static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1947 u8 *res, unsigned long bufsize)
1948{
1949 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1950
1951 args->count = 0;
1952 while (args->count < bufsize) {
1953 hva = gfn_to_hva(kvm, cur_gfn);
1954 /*
1955 * We return an error if the first value was invalid, but we
1956 * return successfully if at least one value was copied.
1957 */
1958 if (kvm_is_error_hva(hva))
1959 return args->count ? 0 : -EFAULT;
1960 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1961 pgstev = 0;
1962 res[args->count++] = (pgstev >> 24) & 0x43;
1963 cur_gfn++;
1964 }
1965
1966 return 0;
1967}
1968
Maciej S. Szmigieroc928bfc2021-12-06 20:54:25 +01001969static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
1970 gfn_t gfn)
1971{
1972 return ____gfn_to_memslot(slots, gfn, true);
1973}
1974
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001975static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1976 unsigned long cur_gfn)
1977{
Maciej S. Szmigieroc928bfc2021-12-06 20:54:25 +01001978 struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
1979 int slotidx = ms - slots->memslots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001980 unsigned long ofs = cur_gfn - ms->base_gfn;
1981
1982 if (ms->base_gfn + ms->npages <= cur_gfn) {
1983 slotidx--;
1984 /* If we are above the highest slot, wrap around */
1985 if (slotidx < 0)
1986 slotidx = slots->used_slots - 1;
1987
1988 ms = slots->memslots + slotidx;
1989 ofs = 0;
1990 }
1991 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1992 while ((slotidx > 0) && (ofs >= ms->npages)) {
1993 slotidx--;
1994 ms = slots->memslots + slotidx;
1995 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1996 }
1997 return ms->base_gfn + ofs;
1998}
1999
2000static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2001 u8 *res, unsigned long bufsize)
2002{
2003 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2004 struct kvm_memslots *slots = kvm_memslots(kvm);
2005 struct kvm_memory_slot *ms;
2006
Sean Christopherson0774a962020-03-20 13:55:40 -07002007 if (unlikely(!slots->used_slots))
2008 return 0;
2009
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002010 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2011 ms = gfn_to_memslot(kvm, cur_gfn);
2012 args->count = 0;
2013 args->start_gfn = cur_gfn;
2014 if (!ms)
2015 return 0;
2016 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2017 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2018
2019 while (args->count < bufsize) {
2020 hva = gfn_to_hva(kvm, cur_gfn);
2021 if (kvm_is_error_hva(hva))
2022 return 0;
2023 /* Decrement only if we actually flipped the bit to 0 */
2024 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2025 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2026 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2027 pgstev = 0;
2028 /* Save the value */
2029 res[args->count++] = (pgstev >> 24) & 0x43;
2030 /* If the next bit is too far away, stop. */
2031 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2032 return 0;
2033 /* If we reached the previous "next", find the next one */
2034 if (cur_gfn == next_gfn)
2035 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2036 /* Reached the end of memory or of the buffer, stop */
2037 if ((next_gfn >= mem_end) ||
2038 (next_gfn - args->start_gfn >= bufsize))
2039 return 0;
2040 cur_gfn++;
2041 /* Reached the end of the current memslot, take the next one. */
2042 if (cur_gfn - ms->base_gfn >= ms->npages) {
2043 ms = gfn_to_memslot(kvm, cur_gfn);
2044 if (!ms)
2045 return 0;
2046 }
2047 }
2048 return 0;
2049}
2050
2051/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002052 * This function searches for the next page with dirty CMMA attributes, and
2053 * saves the attributes in the buffer up to either the end of the buffer or
2054 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2055 * no trailing clean bytes are saved.
2056 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2057 * output buffer will indicate 0 as length.
2058 */
2059static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2060 struct kvm_s390_cmma_log *args)
2061{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002062 unsigned long bufsize;
2063 int srcu_idx, peek, ret;
2064 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002065
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002066 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002067 return -ENXIO;
2068 /* Invalid/unsupported flags were specified */
2069 if (args->flags & ~KVM_S390_CMMA_PEEK)
2070 return -EINVAL;
2071 /* Migration mode query, and we are not doing a migration */
2072 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002073 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002074 return -EINVAL;
2075 /* CMMA is disabled or was not used, or the buffer has length zero */
2076 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002077 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002078 memset(args, 0, sizeof(*args));
2079 return 0;
2080 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002081 /* We are not peeking, and there are no dirty pages */
2082 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2083 memset(args, 0, sizeof(*args));
2084 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002085 }
2086
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002087 values = vmalloc(bufsize);
2088 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002089 return -ENOMEM;
2090
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002091 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 if (peek)
2094 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2095 else
2096 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002098 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002099
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002100 if (kvm->arch.migration_mode)
2101 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2102 else
2103 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002104
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002105 if (copy_to_user((void __user *)args->values, values, args->count))
2106 ret = -EFAULT;
2107
2108 vfree(values);
2109 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002110}
2111
2112/*
2113 * This function sets the CMMA attributes for the given pages. If the input
2114 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002115 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002116 */
2117static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2118 const struct kvm_s390_cmma_log *args)
2119{
2120 unsigned long hva, mask, pgstev, i;
2121 uint8_t *bits;
2122 int srcu_idx, r = 0;
2123
2124 mask = args->mask;
2125
2126 if (!kvm->arch.use_cmma)
2127 return -ENXIO;
2128 /* invalid/unsupported flags */
2129 if (args->flags != 0)
2130 return -EINVAL;
2131 /* Enforce sane limit on memory allocation */
2132 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2133 return -EINVAL;
2134 /* Nothing to do */
2135 if (args->count == 0)
2136 return 0;
2137
Kees Cook42bc47b2018-06-12 14:27:11 -07002138 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002139 if (!bits)
2140 return -ENOMEM;
2141
2142 r = copy_from_user(bits, (void __user *)args->values, args->count);
2143 if (r) {
2144 r = -EFAULT;
2145 goto out;
2146 }
2147
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002148 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002149 srcu_idx = srcu_read_lock(&kvm->srcu);
2150 for (i = 0; i < args->count; i++) {
2151 hva = gfn_to_hva(kvm, args->start_gfn + i);
2152 if (kvm_is_error_hva(hva)) {
2153 r = -EFAULT;
2154 break;
2155 }
2156
2157 pgstev = bits[i];
2158 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002159 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002160 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2161 }
2162 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002163 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002164
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002165 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002166 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002167 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002168 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002169 }
2170out:
2171 vfree(bits);
2172 return r;
2173}
2174
Janosch Frank29b40f12019-09-30 04:19:18 -04002175static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2176{
2177 struct kvm_vcpu *vcpu;
2178 u16 rc, rrc;
2179 int ret = 0;
Marc Zyngier46808a42021-11-16 16:04:02 +00002180 unsigned long i;
Janosch Frank29b40f12019-09-30 04:19:18 -04002181
2182 /*
2183 * We ignore failures and try to destroy as many CPUs as possible.
2184 * At the same time we must not free the assigned resources when
2185 * this fails, as the ultravisor has still access to that memory.
2186 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2187 * behind.
2188 * We want to return the first failure rc and rrc, though.
2189 */
2190 kvm_for_each_vcpu(i, vcpu, kvm) {
2191 mutex_lock(&vcpu->mutex);
2192 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2193 *rcp = rc;
2194 *rrcp = rrc;
2195 ret = -EIO;
2196 }
2197 mutex_unlock(&vcpu->mutex);
2198 }
2199 return ret;
2200}
2201
2202static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2203{
Marc Zyngier46808a42021-11-16 16:04:02 +00002204 unsigned long i;
2205 int r = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04002206 u16 dummy;
2207
2208 struct kvm_vcpu *vcpu;
2209
2210 kvm_for_each_vcpu(i, vcpu, kvm) {
2211 mutex_lock(&vcpu->mutex);
2212 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2213 mutex_unlock(&vcpu->mutex);
2214 if (r)
2215 break;
2216 }
2217 if (r)
2218 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2219 return r;
2220}
2221
2222static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2223{
2224 int r = 0;
2225 u16 dummy;
2226 void __user *argp = (void __user *)cmd->data;
2227
2228 switch (cmd->cmd) {
2229 case KVM_PV_ENABLE: {
2230 r = -EINVAL;
2231 if (kvm_s390_pv_is_protected(kvm))
2232 break;
2233
2234 /*
2235 * FMT 4 SIE needs esca. As we never switch back to bsca from
2236 * esca, we need no cleanup in the error cases below
2237 */
2238 r = sca_switch_to_extended(kvm);
2239 if (r)
2240 break;
2241
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002242 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002243 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002244 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002245 if (r)
2246 break;
2247
Janosch Frank29b40f12019-09-30 04:19:18 -04002248 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2249 if (r)
2250 break;
2251
2252 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2253 if (r)
2254 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002255
2256 /* we need to block service interrupts from now on */
2257 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002258 break;
2259 }
2260 case KVM_PV_DISABLE: {
2261 r = -EINVAL;
2262 if (!kvm_s390_pv_is_protected(kvm))
2263 break;
2264
2265 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2266 /*
2267 * If a CPU could not be destroyed, destroy VM will also fail.
2268 * There is no point in trying to destroy it. Instead return
2269 * the rc and rrc from the first CPU that failed destroying.
2270 */
2271 if (r)
2272 break;
2273 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002274
2275 /* no need to block service interrupts any more */
2276 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002277 break;
2278 }
2279 case KVM_PV_SET_SEC_PARMS: {
2280 struct kvm_s390_pv_sec_parm parms = {};
2281 void *hdr;
2282
2283 r = -EINVAL;
2284 if (!kvm_s390_pv_is_protected(kvm))
2285 break;
2286
2287 r = -EFAULT;
2288 if (copy_from_user(&parms, argp, sizeof(parms)))
2289 break;
2290
2291 /* Currently restricted to 8KB */
2292 r = -EINVAL;
2293 if (parms.length > PAGE_SIZE * 2)
2294 break;
2295
2296 r = -ENOMEM;
2297 hdr = vmalloc(parms.length);
2298 if (!hdr)
2299 break;
2300
2301 r = -EFAULT;
2302 if (!copy_from_user(hdr, (void __user *)parms.origin,
2303 parms.length))
2304 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2305 &cmd->rc, &cmd->rrc);
2306
2307 vfree(hdr);
2308 break;
2309 }
2310 case KVM_PV_UNPACK: {
2311 struct kvm_s390_pv_unp unp = {};
2312
2313 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002314 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002315 break;
2316
2317 r = -EFAULT;
2318 if (copy_from_user(&unp, argp, sizeof(unp)))
2319 break;
2320
2321 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2322 &cmd->rc, &cmd->rrc);
2323 break;
2324 }
2325 case KVM_PV_VERIFY: {
2326 r = -EINVAL;
2327 if (!kvm_s390_pv_is_protected(kvm))
2328 break;
2329
2330 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2331 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2332 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2333 cmd->rrc);
2334 break;
2335 }
Janosch Franke0d27732019-05-09 13:07:21 +02002336 case KVM_PV_PREP_RESET: {
2337 r = -EINVAL;
2338 if (!kvm_s390_pv_is_protected(kvm))
2339 break;
2340
2341 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2342 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2343 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2344 cmd->rc, cmd->rrc);
2345 break;
2346 }
2347 case KVM_PV_UNSHARE_ALL: {
2348 r = -EINVAL;
2349 if (!kvm_s390_pv_is_protected(kvm))
2350 break;
2351
2352 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2353 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2354 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2355 cmd->rc, cmd->rrc);
2356 break;
2357 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002358 default:
2359 r = -ENOTTY;
2360 }
2361 return r;
2362}
2363
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002364long kvm_arch_vm_ioctl(struct file *filp,
2365 unsigned int ioctl, unsigned long arg)
2366{
2367 struct kvm *kvm = filp->private_data;
2368 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002369 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002370 int r;
2371
2372 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002373 case KVM_S390_INTERRUPT: {
2374 struct kvm_s390_interrupt s390int;
2375
2376 r = -EFAULT;
2377 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2378 break;
2379 r = kvm_s390_inject_vm(kvm, &s390int);
2380 break;
2381 }
Cornelia Huck84223592013-07-15 13:36:01 +02002382 case KVM_CREATE_IRQCHIP: {
2383 struct kvm_irq_routing_entry routing;
2384
2385 r = -EINVAL;
2386 if (kvm->arch.use_irqchip) {
2387 /* Set up dummy routing. */
2388 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002389 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002390 }
2391 break;
2392 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002393 case KVM_SET_DEVICE_ATTR: {
2394 r = -EFAULT;
2395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2396 break;
2397 r = kvm_s390_vm_set_attr(kvm, &attr);
2398 break;
2399 }
2400 case KVM_GET_DEVICE_ATTR: {
2401 r = -EFAULT;
2402 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2403 break;
2404 r = kvm_s390_vm_get_attr(kvm, &attr);
2405 break;
2406 }
2407 case KVM_HAS_DEVICE_ATTR: {
2408 r = -EFAULT;
2409 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2410 break;
2411 r = kvm_s390_vm_has_attr(kvm, &attr);
2412 break;
2413 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002414 case KVM_S390_GET_SKEYS: {
2415 struct kvm_s390_skeys args;
2416
2417 r = -EFAULT;
2418 if (copy_from_user(&args, argp,
2419 sizeof(struct kvm_s390_skeys)))
2420 break;
2421 r = kvm_s390_get_skeys(kvm, &args);
2422 break;
2423 }
2424 case KVM_S390_SET_SKEYS: {
2425 struct kvm_s390_skeys args;
2426
2427 r = -EFAULT;
2428 if (copy_from_user(&args, argp,
2429 sizeof(struct kvm_s390_skeys)))
2430 break;
2431 r = kvm_s390_set_skeys(kvm, &args);
2432 break;
2433 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002434 case KVM_S390_GET_CMMA_BITS: {
2435 struct kvm_s390_cmma_log args;
2436
2437 r = -EFAULT;
2438 if (copy_from_user(&args, argp, sizeof(args)))
2439 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002440 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002441 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002442 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002443 if (!r) {
2444 r = copy_to_user(argp, &args, sizeof(args));
2445 if (r)
2446 r = -EFAULT;
2447 }
2448 break;
2449 }
2450 case KVM_S390_SET_CMMA_BITS: {
2451 struct kvm_s390_cmma_log args;
2452
2453 r = -EFAULT;
2454 if (copy_from_user(&args, argp, sizeof(args)))
2455 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002456 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002457 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002458 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002459 break;
2460 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002461 case KVM_S390_PV_COMMAND: {
2462 struct kvm_pv_cmd args;
2463
Eric Farman67cf68b2021-10-08 22:31:12 +02002464 /* protvirt means user cpu state */
2465 kvm_s390_set_user_cpu_state_ctrl(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002466 r = 0;
2467 if (!is_prot_virt_host()) {
2468 r = -EINVAL;
2469 break;
2470 }
2471 if (copy_from_user(&args, argp, sizeof(args))) {
2472 r = -EFAULT;
2473 break;
2474 }
2475 if (args.flags) {
2476 r = -EINVAL;
2477 break;
2478 }
2479 mutex_lock(&kvm->lock);
2480 r = kvm_s390_handle_pv(kvm, &args);
2481 mutex_unlock(&kvm->lock);
2482 if (copy_to_user(argp, &args, sizeof(args))) {
2483 r = -EFAULT;
2484 break;
2485 }
2486 break;
2487 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002488 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002489 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002490 }
2491
2492 return r;
2493}
2494
Tony Krowiak45c9b472015-01-13 11:33:26 -05002495static int kvm_s390_apxa_installed(void)
2496{
Tony Krowiake585b242018-09-25 19:16:18 -04002497 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002498
Tony Krowiake585b242018-09-25 19:16:18 -04002499 if (ap_instructions_available()) {
2500 if (ap_qci(&info) == 0)
2501 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002502 }
2503
2504 return 0;
2505}
2506
Tony Krowiake585b242018-09-25 19:16:18 -04002507/*
2508 * The format of the crypto control block (CRYCB) is specified in the 3 low
2509 * order bits of the CRYCB designation (CRYCBD) field as follows:
2510 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2511 * AP extended addressing (APXA) facility are installed.
2512 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2513 * Format 2: Both the APXA and MSAX3 facilities are installed
2514 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002515static void kvm_s390_set_crycb_format(struct kvm *kvm)
2516{
2517 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2518
Tony Krowiake585b242018-09-25 19:16:18 -04002519 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2520 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2521
2522 /* Check whether MSAX3 is installed */
2523 if (!test_kvm_facility(kvm, 76))
2524 return;
2525
Tony Krowiak45c9b472015-01-13 11:33:26 -05002526 if (kvm_s390_apxa_installed())
2527 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2528 else
2529 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2530}
2531
Tony Krowiak86956e72021-08-23 17:20:47 -04002532/*
2533 * kvm_arch_crypto_set_masks
2534 *
2535 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2536 * to be set.
2537 * @apm: the mask identifying the accessible AP adapters
2538 * @aqm: the mask identifying the accessible AP domains
2539 * @adm: the mask identifying the accessible AP control domains
2540 *
2541 * Set the masks that identify the adapters, domains and control domains to
2542 * which the KVM guest is granted access.
2543 *
2544 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2545 * function.
2546 */
Pierre Morel0e237e42018-10-05 10:31:09 +02002547void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2548 unsigned long *aqm, unsigned long *adm)
2549{
2550 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2551
Pierre Morel0e237e42018-10-05 10:31:09 +02002552 kvm_s390_vcpu_block_all(kvm);
2553
2554 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2555 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2556 memcpy(crycb->apcb1.apm, apm, 32);
2557 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2558 apm[0], apm[1], apm[2], apm[3]);
2559 memcpy(crycb->apcb1.aqm, aqm, 32);
2560 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2561 aqm[0], aqm[1], aqm[2], aqm[3]);
2562 memcpy(crycb->apcb1.adm, adm, 32);
2563 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2564 adm[0], adm[1], adm[2], adm[3]);
2565 break;
2566 case CRYCB_FORMAT1:
2567 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2568 memcpy(crycb->apcb0.apm, apm, 8);
2569 memcpy(crycb->apcb0.aqm, aqm, 2);
2570 memcpy(crycb->apcb0.adm, adm, 2);
2571 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2572 apm[0], *((unsigned short *)aqm),
2573 *((unsigned short *)adm));
2574 break;
2575 default: /* Can not happen */
2576 break;
2577 }
2578
2579 /* recreate the shadow crycb for each vcpu */
2580 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2581 kvm_s390_vcpu_unblock_all(kvm);
Pierre Morel0e237e42018-10-05 10:31:09 +02002582}
2583EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2584
Tony Krowiak86956e72021-08-23 17:20:47 -04002585/*
2586 * kvm_arch_crypto_clear_masks
2587 *
2588 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2589 * to be cleared.
2590 *
2591 * Clear the masks that identify the adapters, domains and control domains to
2592 * which the KVM guest is granted access.
2593 *
2594 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2595 * function.
2596 */
Tony Krowiak421045982018-09-25 19:16:25 -04002597void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2598{
Tony Krowiak421045982018-09-25 19:16:25 -04002599 kvm_s390_vcpu_block_all(kvm);
2600
2601 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2602 sizeof(kvm->arch.crypto.crycb->apcb0));
2603 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2604 sizeof(kvm->arch.crypto.crycb->apcb1));
2605
Pierre Morel0e237e42018-10-05 10:31:09 +02002606 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002607 /* recreate the shadow crycb for each vcpu */
2608 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002609 kvm_s390_vcpu_unblock_all(kvm);
Tony Krowiak421045982018-09-25 19:16:25 -04002610}
2611EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2612
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002613static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002614{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002615 struct cpuid cpuid;
2616
2617 get_cpu_id(&cpuid);
2618 cpuid.version = 0xff;
2619 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002620}
2621
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002622static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002623{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002624 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002625 kvm_s390_set_crycb_format(kvm);
Tony Krowiak1e753732021-08-23 17:20:46 -04002626 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002627
Tony Krowiake585b242018-09-25 19:16:18 -04002628 if (!test_kvm_facility(kvm, 76))
2629 return;
2630
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002631 /* Enable AES/DEA protected key functions by default */
2632 kvm->arch.crypto.aes_kw = 1;
2633 kvm->arch.crypto.dea_kw = 1;
2634 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2635 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2636 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2637 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002638}
2639
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002640static void sca_dispose(struct kvm *kvm)
2641{
2642 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002643 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002644 else
2645 free_page((unsigned long)(kvm->arch.sca));
2646 kvm->arch.sca = NULL;
2647}
2648
Carsten Ottee08b9632012-01-04 10:25:20 +01002649int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002650{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002651 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002652 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002653 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002654 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002655
Carsten Ottee08b9632012-01-04 10:25:20 +01002656 rc = -EINVAL;
2657#ifdef CONFIG_KVM_S390_UCONTROL
2658 if (type & ~KVM_VM_S390_UCONTROL)
2659 goto out_err;
2660 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2661 goto out_err;
2662#else
2663 if (type)
2664 goto out_err;
2665#endif
2666
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002667 rc = s390_enable_sie();
2668 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002669 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002670
Carsten Otteb2904112011-10-18 12:27:13 +02002671 rc = -ENOMEM;
2672
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002673 if (!sclp.has_64bscao)
2674 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002675 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002676 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002677 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002678 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002679 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002680 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002681 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002682 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002683 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002684 kvm->arch.sca = (struct bsca_block *)
2685 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002686 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002687
2688 sprintf(debug_name, "kvm-%u", current->pid);
2689
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002690 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002691 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002692 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693
Michael Mueller19114be2017-05-30 14:26:02 +02002694 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002695 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002696 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002697 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002698 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002699
Michael Mueller25c84db2019-01-31 09:52:41 +01002700 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002701 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002702
2703 for (i = 0; i < kvm_s390_fac_size(); i++) {
Sven Schnelle17e89e12021-05-05 22:01:10 +02002704 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002705 (kvm_s390_fac_base[i] |
2706 kvm_s390_fac_ext[i]);
Sven Schnelle17e89e12021-05-05 22:01:10 +02002707 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002708 kvm_s390_fac_base[i];
2709 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002710 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002711
David Hildenbrand19352222017-08-29 16:31:08 +02002712 /* we are always in czam mode - even on pre z14 machines */
2713 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2714 set_kvm_facility(kvm->arch.model.fac_list, 138);
2715 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002716 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2717 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002718 if (MACHINE_HAS_TLB_GUEST) {
2719 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2720 set_kvm_facility(kvm->arch.model.fac_list, 147);
2721 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002722
Pierre Morel05f31e32019-05-21 17:34:37 +02002723 if (css_general_characteristics.aiv && test_facility(65))
2724 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2725
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002726 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002727 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002728
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002729 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002730
Fei Li51978392017-02-17 17:06:26 +08002731 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002732 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002733 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2734 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002735 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002736 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002737
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002739 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002740
Carsten Ottee08b9632012-01-04 10:25:20 +01002741 if (type & KVM_VM_S390_UCONTROL) {
2742 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002743 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002744 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002745 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002746 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002747 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002748 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002749 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002750 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002751 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002752 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002753 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002754 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002755 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002756
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002757 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002758 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002759 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002760 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002761 if (use_gisa)
2762 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002763 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002764
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002765 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002766out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002767 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002768 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002769 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002770 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002771 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002772}
2773
Christian Borntraegerd329c032008-11-26 14:50:27 +01002774void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2775{
Janosch Frank29b40f12019-09-30 04:19:18 -04002776 u16 rc, rrc;
2777
Christian Borntraegerd329c032008-11-26 14:50:27 +01002778 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002779 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002780 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002781 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002782 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002783 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002784
2785 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002786 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002787
Dominik Dingele6db1d62015-05-07 15:41:57 +02002788 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002789 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002790 /* We can not hold the vcpu mutex here, we are already dying */
2791 if (kvm_s390_pv_cpu_get_handle(vcpu))
2792 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002793 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002794}
2795
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002796void kvm_arch_destroy_vm(struct kvm *kvm)
2797{
Janosch Frank29b40f12019-09-30 04:19:18 -04002798 u16 rc, rrc;
2799
Marc Zyngier27592ae2021-11-16 16:03:57 +00002800 kvm_destroy_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002801 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002802 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002803 /*
2804 * We are already at the end of life and kvm->lock is not taken.
2805 * This is ok as the file descriptor is closed by now and nobody
2806 * can mess with the pv state. To avoid lockdep_assert_held from
2807 * complaining we do not use kvm_s390_pv_is_protected.
2808 */
2809 if (kvm_s390_pv_get_handle(kvm))
2810 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2811 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002812 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002813 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002814 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002815 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002816 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002817 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002818 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002819}
2820
2821/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002822static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2823{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002824 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002825 if (!vcpu->arch.gmap)
2826 return -ENOMEM;
2827 vcpu->arch.gmap->private = vcpu->kvm;
2828
2829 return 0;
2830}
2831
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002832static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2833{
David Hildenbranda6940672016-08-08 22:39:32 +02002834 if (!kvm_s390_use_sca_entries())
2835 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002836 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002837 if (vcpu->kvm->arch.use_esca) {
2838 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002839
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002840 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002841 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002842 } else {
2843 struct bsca_block *sca = vcpu->kvm->arch.sca;
2844
2845 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002846 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002847 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002848 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002849}
2850
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002851static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002852{
David Hildenbranda6940672016-08-08 22:39:32 +02002853 if (!kvm_s390_use_sca_entries()) {
2854 struct bsca_block *sca = vcpu->kvm->arch.sca;
2855
2856 /* we still need the basic sca for the ipte control */
2857 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2858 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002859 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002860 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002861 read_lock(&vcpu->kvm->arch.sca_lock);
2862 if (vcpu->kvm->arch.use_esca) {
2863 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002864
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002865 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002866 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2867 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002868 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002869 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002870 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002871 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002872
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002873 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002874 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2875 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002876 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002877 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002878 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002879}
2880
2881/* Basic SCA to Extended SCA data copy routines */
2882static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2883{
2884 d->sda = s->sda;
2885 d->sigp_ctrl.c = s->sigp_ctrl.c;
2886 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2887}
2888
2889static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2890{
2891 int i;
2892
2893 d->ipte_control = s->ipte_control;
2894 d->mcn[0] = s->mcn;
2895 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2896 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2897}
2898
2899static int sca_switch_to_extended(struct kvm *kvm)
2900{
2901 struct bsca_block *old_sca = kvm->arch.sca;
2902 struct esca_block *new_sca;
2903 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +00002904 unsigned long vcpu_idx;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002905 u32 scaol, scaoh;
2906
Janosch Frank29b40f12019-09-30 04:19:18 -04002907 if (kvm->arch.use_esca)
2908 return 0;
2909
Christian Borntraegerc4196212020-11-06 08:34:23 +01002910 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002911 if (!new_sca)
2912 return -ENOMEM;
2913
2914 scaoh = (u32)((u64)(new_sca) >> 32);
2915 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2916
2917 kvm_s390_vcpu_block_all(kvm);
2918 write_lock(&kvm->arch.sca_lock);
2919
2920 sca_copy_b_to_e(new_sca, old_sca);
2921
2922 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2923 vcpu->arch.sie_block->scaoh = scaoh;
2924 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002925 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002926 }
2927 kvm->arch.sca = new_sca;
2928 kvm->arch.use_esca = 1;
2929
2930 write_unlock(&kvm->arch.sca_lock);
2931 kvm_s390_vcpu_unblock_all(kvm);
2932
2933 free_page((unsigned long)old_sca);
2934
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002935 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2936 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002937 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002938}
2939
2940static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2941{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002942 int rc;
2943
David Hildenbranda6940672016-08-08 22:39:32 +02002944 if (!kvm_s390_use_sca_entries()) {
2945 if (id < KVM_MAX_VCPUS)
2946 return true;
2947 return false;
2948 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002949 if (id < KVM_S390_BSCA_CPU_SLOTS)
2950 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002951 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002952 return false;
2953
2954 mutex_lock(&kvm->lock);
2955 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2956 mutex_unlock(&kvm->lock);
2957
2958 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002959}
2960
David Hildenbranddb0758b2016-02-15 09:42:25 +01002961/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2962static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2963{
2964 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002965 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002966 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002967 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002968}
2969
2970/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2971static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2972{
2973 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002974 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002975 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2976 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002977 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002978}
2979
2980/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2981static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2982{
2983 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2984 vcpu->arch.cputm_enabled = true;
2985 __start_cpu_timer_accounting(vcpu);
2986}
2987
2988/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2989static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2990{
2991 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2992 __stop_cpu_timer_accounting(vcpu);
2993 vcpu->arch.cputm_enabled = false;
2994}
2995
2996static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2997{
2998 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2999 __enable_cpu_timer_accounting(vcpu);
3000 preempt_enable();
3001}
3002
3003static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3004{
3005 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3006 __disable_cpu_timer_accounting(vcpu);
3007 preempt_enable();
3008}
3009
David Hildenbrand4287f242016-02-15 09:40:12 +01003010/* set the cpu timer - may only be called from the VCPU thread itself */
3011void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3012{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003013 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003014 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003015 if (vcpu->arch.cputm_enabled)
3016 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003017 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003018 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003019 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003020}
3021
David Hildenbranddb0758b2016-02-15 09:42:25 +01003022/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003023__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3024{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003025 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003026 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003027
3028 if (unlikely(!vcpu->arch.cputm_enabled))
3029 return vcpu->arch.sie_block->cputm;
3030
David Hildenbrand9c23a132016-02-17 21:53:33 +01003031 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3032 do {
3033 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3034 /*
3035 * If the writer would ever execute a read in the critical
3036 * section, e.g. in irq context, we have a deadlock.
3037 */
3038 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3039 value = vcpu->arch.sie_block->cputm;
3040 /* if cputm_start is 0, accounting is being started/stopped */
3041 if (likely(vcpu->arch.cputm_start))
3042 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3043 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3044 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003045 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003046}
3047
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003048void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3049{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003050
David Hildenbrand37d9df92015-03-11 16:47:33 +01003051 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003052 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003053 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003054 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003055 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003056}
3057
3058void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3059{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003060 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003061 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003062 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003063 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003064 vcpu->arch.enabled_gmap = gmap_get_enabled();
3065 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003066
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003067}
3068
Dominik Dingel31928aa2014-12-04 15:47:07 +01003069void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003070{
Jason J. Herne72f25022014-11-25 09:46:02 -05003071 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003072 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003073 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003074 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003075 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003076 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003077 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003078 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003079 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003080 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003081 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3082 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003083 /* make vcpu_load load the right gmap on the first trigger */
3084 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003085}
3086
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003087static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3088{
3089 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3090 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3091 return true;
3092 return false;
3093}
3094
3095static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3096{
3097 /* At least one ECC subfunction must be present */
3098 return kvm_has_pckmo_subfunc(kvm, 32) ||
3099 kvm_has_pckmo_subfunc(kvm, 33) ||
3100 kvm_has_pckmo_subfunc(kvm, 34) ||
3101 kvm_has_pckmo_subfunc(kvm, 40) ||
3102 kvm_has_pckmo_subfunc(kvm, 41);
3103
3104}
3105
Tony Krowiak5102ee82014-06-27 14:46:01 -04003106static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3107{
Tony Krowiake585b242018-09-25 19:16:18 -04003108 /*
3109 * If the AP instructions are not being interpreted and the MSAX3
3110 * facility is not configured for the guest, there is nothing to set up.
3111 */
3112 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003113 return;
3114
Tony Krowiake585b242018-09-25 19:16:18 -04003115 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003116 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003117 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003118 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003119
Tony Krowiake585b242018-09-25 19:16:18 -04003120 if (vcpu->kvm->arch.crypto.apie)
3121 vcpu->arch.sie_block->eca |= ECA_APIE;
3122
3123 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003124 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003125 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003126 /* ecc is also wrapped with AES key */
3127 if (kvm_has_pckmo_ecc(vcpu->kvm))
3128 vcpu->arch.sie_block->ecd |= ECD_ECC;
3129 }
3130
Tony Krowiaka374e892014-09-03 10:13:53 +02003131 if (vcpu->kvm->arch.crypto.dea_kw)
3132 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003133}
3134
Dominik Dingelb31605c2014-03-25 13:47:11 +01003135void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3136{
3137 free_page(vcpu->arch.sie_block->cbrlo);
3138 vcpu->arch.sie_block->cbrlo = 0;
3139}
3140
3141int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3142{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003143 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003144 if (!vcpu->arch.sie_block->cbrlo)
3145 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003146 return 0;
3147}
3148
Michael Mueller91520f12015-02-27 14:32:11 +01003149static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3150{
3151 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3152
Michael Mueller91520f12015-02-27 14:32:11 +01003153 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003154 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003155 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003156}
3157
Sean Christophersonff72bb52019-12-18 13:55:20 -08003158static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3159{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003160 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003161 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003162
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003163 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3164 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003165 CPUSTAT_STOPPED);
3166
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003167 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003168 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003169 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003170 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003171
Michael Mueller91520f12015-02-27 14:32:11 +01003172 kvm_s390_vcpu_setup_model(vcpu);
3173
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003174 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3175 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003176 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003177 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003178 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003179 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003180 vcpu->arch.sie_block->ecb |= ECB_TE;
Janis Schoetterl-Glausch7119dec2021-06-29 10:55:30 +02003181 if (!kvm_is_ucontrol(vcpu->kvm))
3182 vcpu->arch.sie_block->ecb |= ECB_SPECI;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003183
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003184 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003185 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003186 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003187 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3188 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003189 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003190 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003191 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003192 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003193 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003194 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003195 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003196 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003197 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003198 vcpu->arch.sie_block->eca |= ECA_VX;
3199 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003200 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003201 if (test_kvm_facility(vcpu->kvm, 139))
3202 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003203 if (test_kvm_facility(vcpu->kvm, 156))
3204 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003205 if (vcpu->arch.sie_block->gd) {
3206 vcpu->arch.sie_block->eca |= ECA_AIV;
3207 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3208 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3209 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003210 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3211 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003212 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003213
3214 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003215 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003216 else
3217 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003218
Dominik Dingele6db1d62015-05-07 15:41:57 +02003219 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003220 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3221 if (rc)
3222 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003223 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003224 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003225 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003226
Collin Walling67d49d52018-08-31 12:51:19 -04003227 vcpu->arch.sie_block->hpid = HPID_KVM;
3228
Tony Krowiak5102ee82014-06-27 14:46:01 -04003229 kvm_s390_vcpu_crypto_setup(vcpu);
3230
Janosch Frank29b40f12019-09-30 04:19:18 -04003231 mutex_lock(&vcpu->kvm->lock);
3232 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3233 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3234 if (rc)
3235 kvm_s390_vcpu_unsetup_cmma(vcpu);
3236 }
3237 mutex_unlock(&vcpu->kvm->lock);
3238
Dominik Dingelb31605c2014-03-25 13:47:11 +01003239 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003240}
3241
Sean Christopherson897cc382019-12-18 13:55:09 -08003242int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3243{
3244 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3245 return -EINVAL;
3246 return 0;
3247}
3248
Sean Christophersone529ef62019-12-18 13:55:15 -08003249int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003250{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003251 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003252 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003253
QingFeng Haoda72ca42017-06-07 11:41:19 +02003254 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003255 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003256 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003257 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003258
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003259 vcpu->arch.sie_block = &sie_page->sie_block;
3260 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3261
David Hildenbrandefed1102015-04-16 12:32:41 +02003262 /* the real guest size will always be smaller than msl */
3263 vcpu->arch.sie_block->mso = 0;
3264 vcpu->arch.sie_block->msl = sclp.hamax;
3265
Sean Christophersone529ef62019-12-18 13:55:15 -08003266 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003267 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003268 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003269 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3270 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003271 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003272
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003273 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3274 kvm_clear_async_pf_completion_queue(vcpu);
3275 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3276 KVM_SYNC_GPRS |
3277 KVM_SYNC_ACRS |
3278 KVM_SYNC_CRS |
3279 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003280 KVM_SYNC_PFAULT |
3281 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003282 kvm_s390_set_prefix(vcpu, 0);
3283 if (test_kvm_facility(vcpu->kvm, 64))
3284 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3285 if (test_kvm_facility(vcpu->kvm, 82))
3286 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3287 if (test_kvm_facility(vcpu->kvm, 133))
3288 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3289 if (test_kvm_facility(vcpu->kvm, 156))
3290 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3291 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3292 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3293 */
3294 if (MACHINE_HAS_VX)
3295 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3296 else
3297 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3298
3299 if (kvm_is_ucontrol(vcpu->kvm)) {
3300 rc = __kvm_ucontrol_vcpu_init(vcpu);
3301 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003302 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003303 }
3304
Sean Christophersone529ef62019-12-18 13:55:15 -08003305 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3306 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3307 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308
Sean Christophersonff72bb52019-12-18 13:55:20 -08003309 rc = kvm_s390_vcpu_setup(vcpu);
3310 if (rc)
3311 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003312 return 0;
3313
Sean Christophersonff72bb52019-12-18 13:55:20 -08003314out_ucontrol_uninit:
3315 if (kvm_is_ucontrol(vcpu->kvm))
3316 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003317out_free_sie_block:
3318 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003319 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003320}
3321
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003322int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3323{
Halil Pasic9b57e9d2021-10-19 19:53:59 +02003324 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
David Hildenbrand9a022062014-08-05 17:40:47 +02003325 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003326}
3327
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003328bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3329{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003330 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003331}
3332
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003333void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003334{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003335 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003336 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003337}
3338
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003339void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003340{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003341 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003342}
3343
Christian Borntraeger8e236542015-04-09 13:49:04 +02003344static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3345{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003346 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003347 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003348}
3349
David Hildenbrand9ea59722018-09-25 19:16:16 -04003350bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3351{
3352 return atomic_read(&vcpu->arch.sie_block->prog20) &
3353 (PROG_BLOCK_SIE | PROG_REQUEST);
3354}
3355
Christian Borntraeger8e236542015-04-09 13:49:04 +02003356static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3357{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003358 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003359}
3360
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003361/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003362 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003363 * If the CPU is not running (e.g. waiting as idle) the function will
3364 * return immediately. */
3365void exit_sie(struct kvm_vcpu *vcpu)
3366{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003367 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003368 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003369 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3370 cpu_relax();
3371}
3372
Christian Borntraeger8e236542015-04-09 13:49:04 +02003373/* Kick a guest cpu out of SIE to process a request synchronously */
3374void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003375{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003376 kvm_make_request(req, vcpu);
3377 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003378}
3379
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003380static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3381 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003382{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003383 struct kvm *kvm = gmap->private;
3384 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003385 unsigned long prefix;
Marc Zyngier46808a42021-11-16 16:04:02 +00003386 unsigned long i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003387
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003388 if (gmap_is_shadow(gmap))
3389 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003390 if (start >= 1UL << 31)
3391 /* We are only interested in prefix pages */
3392 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003393 kvm_for_each_vcpu(i, vcpu, kvm) {
3394 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003395 prefix = kvm_s390_get_prefix(vcpu);
3396 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3397 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3398 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003399 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003400 }
3401 }
3402}
3403
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003404bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3405{
3406 /* do not poll with more than halt_poll_max_steal percent of steal time */
3407 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3408 halt_poll_max_steal) {
3409 vcpu->stat.halt_no_poll_steal++;
3410 return true;
3411 }
3412 return false;
3413}
3414
Christoffer Dallb6d33832012-03-08 16:44:24 -05003415int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3416{
3417 /* kvm common code refers to this, but never calls it */
3418 BUG();
3419 return 0;
3420}
3421
Carsten Otte14eebd92012-05-15 14:15:26 +02003422static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3423 struct kvm_one_reg *reg)
3424{
3425 int r = -EINVAL;
3426
3427 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003428 case KVM_REG_S390_TODPR:
3429 r = put_user(vcpu->arch.sie_block->todpr,
3430 (u32 __user *)reg->addr);
3431 break;
3432 case KVM_REG_S390_EPOCHDIFF:
3433 r = put_user(vcpu->arch.sie_block->epoch,
3434 (u64 __user *)reg->addr);
3435 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003436 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003437 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003438 (u64 __user *)reg->addr);
3439 break;
3440 case KVM_REG_S390_CLOCK_COMP:
3441 r = put_user(vcpu->arch.sie_block->ckc,
3442 (u64 __user *)reg->addr);
3443 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003444 case KVM_REG_S390_PFTOKEN:
3445 r = put_user(vcpu->arch.pfault_token,
3446 (u64 __user *)reg->addr);
3447 break;
3448 case KVM_REG_S390_PFCOMPARE:
3449 r = put_user(vcpu->arch.pfault_compare,
3450 (u64 __user *)reg->addr);
3451 break;
3452 case KVM_REG_S390_PFSELECT:
3453 r = put_user(vcpu->arch.pfault_select,
3454 (u64 __user *)reg->addr);
3455 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003456 case KVM_REG_S390_PP:
3457 r = put_user(vcpu->arch.sie_block->pp,
3458 (u64 __user *)reg->addr);
3459 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003460 case KVM_REG_S390_GBEA:
3461 r = put_user(vcpu->arch.sie_block->gbea,
3462 (u64 __user *)reg->addr);
3463 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003464 default:
3465 break;
3466 }
3467
3468 return r;
3469}
3470
3471static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3472 struct kvm_one_reg *reg)
3473{
3474 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003475 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003476
3477 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003478 case KVM_REG_S390_TODPR:
3479 r = get_user(vcpu->arch.sie_block->todpr,
3480 (u32 __user *)reg->addr);
3481 break;
3482 case KVM_REG_S390_EPOCHDIFF:
3483 r = get_user(vcpu->arch.sie_block->epoch,
3484 (u64 __user *)reg->addr);
3485 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003486 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003487 r = get_user(val, (u64 __user *)reg->addr);
3488 if (!r)
3489 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003490 break;
3491 case KVM_REG_S390_CLOCK_COMP:
3492 r = get_user(vcpu->arch.sie_block->ckc,
3493 (u64 __user *)reg->addr);
3494 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003495 case KVM_REG_S390_PFTOKEN:
3496 r = get_user(vcpu->arch.pfault_token,
3497 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003498 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3499 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003500 break;
3501 case KVM_REG_S390_PFCOMPARE:
3502 r = get_user(vcpu->arch.pfault_compare,
3503 (u64 __user *)reg->addr);
3504 break;
3505 case KVM_REG_S390_PFSELECT:
3506 r = get_user(vcpu->arch.pfault_select,
3507 (u64 __user *)reg->addr);
3508 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003509 case KVM_REG_S390_PP:
3510 r = get_user(vcpu->arch.sie_block->pp,
3511 (u64 __user *)reg->addr);
3512 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003513 case KVM_REG_S390_GBEA:
3514 r = get_user(vcpu->arch.sie_block->gbea,
3515 (u64 __user *)reg->addr);
3516 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003517 default:
3518 break;
3519 }
3520
3521 return r;
3522}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003523
Janosch Frank7de3f142020-01-31 05:02:02 -05003524static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003525{
Janosch Frank7de3f142020-01-31 05:02:02 -05003526 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3527 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3528 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3529
3530 kvm_clear_async_pf_completion_queue(vcpu);
3531 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3532 kvm_s390_vcpu_stop(vcpu);
3533 kvm_s390_clear_local_irqs(vcpu);
3534}
3535
3536static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3537{
3538 /* Initial reset is a superset of the normal reset */
3539 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3540
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003541 /*
3542 * This equals initial cpu reset in pop, but we don't switch to ESA.
3543 * We do not only reset the internal data, but also ...
3544 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003545 vcpu->arch.sie_block->gpsw.mask = 0;
3546 vcpu->arch.sie_block->gpsw.addr = 0;
3547 kvm_s390_set_prefix(vcpu, 0);
3548 kvm_s390_set_cpu_timer(vcpu, 0);
3549 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003550 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3551 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3552 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003553
3554 /* ... the data in sync regs */
3555 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3556 vcpu->run->s.regs.ckc = 0;
3557 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3558 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3559 vcpu->run->psw_addr = 0;
3560 vcpu->run->psw_mask = 0;
3561 vcpu->run->s.regs.todpr = 0;
3562 vcpu->run->s.regs.cputm = 0;
3563 vcpu->run->s.regs.ckc = 0;
3564 vcpu->run->s.regs.pp = 0;
3565 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003566 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003567 /*
3568 * Do not reset these registers in the protected case, as some of
3569 * them are overlayed and they are not accessible in this case
3570 * anyway.
3571 */
3572 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3573 vcpu->arch.sie_block->gbea = 1;
3574 vcpu->arch.sie_block->pp = 0;
3575 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3576 vcpu->arch.sie_block->todpr = 0;
3577 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003578}
3579
3580static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3581{
3582 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3583
3584 /* Clear reset is a superset of the initial reset */
3585 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3586
3587 memset(&regs->gprs, 0, sizeof(regs->gprs));
3588 memset(&regs->vrs, 0, sizeof(regs->vrs));
3589 memset(&regs->acrs, 0, sizeof(regs->acrs));
3590 memset(&regs->gscb, 0, sizeof(regs->gscb));
3591
3592 regs->etoken = 0;
3593 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003594}
3595
3596int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3597{
Christoffer Dall875656f2017-12-04 21:35:27 +01003598 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003599 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003600 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003601 return 0;
3602}
3603
3604int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3605{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003606 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003607 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003608 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003609 return 0;
3610}
3611
3612int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3613 struct kvm_sregs *sregs)
3614{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003615 vcpu_load(vcpu);
3616
Christian Borntraeger59674c12012-01-11 11:20:33 +01003617 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003618 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003619
3620 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003621 return 0;
3622}
3623
3624int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3625 struct kvm_sregs *sregs)
3626{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003627 vcpu_load(vcpu);
3628
Christian Borntraeger59674c12012-01-11 11:20:33 +01003629 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003630 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003631
3632 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003633 return 0;
3634}
3635
3636int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3637{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003638 int ret = 0;
3639
3640 vcpu_load(vcpu);
3641
3642 if (test_fp_ctl(fpu->fpc)) {
3643 ret = -EINVAL;
3644 goto out;
3645 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003646 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003647 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003648 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3649 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003650 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003651 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003652
3653out:
3654 vcpu_put(vcpu);
3655 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003656}
3657
3658int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3659{
Christoffer Dall13931232017-12-04 21:35:34 +01003660 vcpu_load(vcpu);
3661
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003662 /* make sure we have the latest values */
3663 save_fpu_regs();
3664 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003665 convert_vx_to_fp((freg_t *) fpu->fprs,
3666 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003667 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003668 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003669 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003670
3671 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003672 return 0;
3673}
3674
3675static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3676{
3677 int rc = 0;
3678
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003679 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003680 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003681 else {
3682 vcpu->run->psw_mask = psw.mask;
3683 vcpu->run->psw_addr = psw.addr;
3684 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003685 return rc;
3686}
3687
3688int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3689 struct kvm_translation *tr)
3690{
3691 return -EINVAL; /* not implemented yet */
3692}
3693
David Hildenbrand27291e22014-01-23 12:26:52 +01003694#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3695 KVM_GUESTDBG_USE_HW_BP | \
3696 KVM_GUESTDBG_ENABLE)
3697
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003698int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3699 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003700{
David Hildenbrand27291e22014-01-23 12:26:52 +01003701 int rc = 0;
3702
Christoffer Dall66b56562017-12-04 21:35:33 +01003703 vcpu_load(vcpu);
3704
David Hildenbrand27291e22014-01-23 12:26:52 +01003705 vcpu->guest_debug = 0;
3706 kvm_s390_clear_bp_data(vcpu);
3707
Christoffer Dall66b56562017-12-04 21:35:33 +01003708 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3709 rc = -EINVAL;
3710 goto out;
3711 }
3712 if (!sclp.has_gpere) {
3713 rc = -EINVAL;
3714 goto out;
3715 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003716
3717 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3718 vcpu->guest_debug = dbg->control;
3719 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003720 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003721
3722 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3723 rc = kvm_s390_import_bp_data(vcpu, dbg);
3724 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003725 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003726 vcpu->arch.guestdbg.last_bp = 0;
3727 }
3728
3729 if (rc) {
3730 vcpu->guest_debug = 0;
3731 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003732 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003733 }
3734
Christoffer Dall66b56562017-12-04 21:35:33 +01003735out:
3736 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003737 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003738}
3739
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003740int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3741 struct kvm_mp_state *mp_state)
3742{
Christoffer Dallfd232562017-12-04 21:35:30 +01003743 int ret;
3744
3745 vcpu_load(vcpu);
3746
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003747 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003748 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3749 KVM_MP_STATE_OPERATING;
3750
3751 vcpu_put(vcpu);
3752 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003753}
3754
3755int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3756 struct kvm_mp_state *mp_state)
3757{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003758 int rc = 0;
3759
Christoffer Dalle83dff52017-12-04 21:35:31 +01003760 vcpu_load(vcpu);
3761
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003762 /* user space knows about this interface - let it control the state */
Eric Farman67cf68b2021-10-08 22:31:12 +02003763 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003764
3765 switch (mp_state->mp_state) {
3766 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003767 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003768 break;
3769 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003770 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003771 break;
3772 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003773 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3774 rc = -ENXIO;
3775 break;
3776 }
3777 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3778 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003779 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003780 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003781 default:
3782 rc = -ENXIO;
3783 }
3784
Christoffer Dalle83dff52017-12-04 21:35:31 +01003785 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003786 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003787}
3788
David Hildenbrand8ad35752014-03-14 11:00:21 +01003789static bool ibs_enabled(struct kvm_vcpu *vcpu)
3790{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003791 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003792}
3793
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003794static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3795{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003796retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003797 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003798 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003799 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003800 /*
3801 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003802 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003803 * This ensures that the ipte instruction for this request has
3804 * already finished. We might race against a second unmapper that
3805 * wants to set the blocking bit. Lets just retry the request loop.
3806 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003807 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003808 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003809 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3810 kvm_s390_get_prefix(vcpu),
3811 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003812 if (rc) {
3813 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003814 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003815 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003816 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003817 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003818
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003819 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3820 vcpu->arch.sie_block->ihcpu = 0xffff;
3821 goto retry;
3822 }
3823
David Hildenbrand8ad35752014-03-14 11:00:21 +01003824 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3825 if (!ibs_enabled(vcpu)) {
3826 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003827 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003828 }
3829 goto retry;
3830 }
3831
3832 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3833 if (ibs_enabled(vcpu)) {
3834 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003835 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003836 }
3837 goto retry;
3838 }
3839
David Hildenbrand6502a342016-06-21 14:19:51 +02003840 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3841 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3842 goto retry;
3843 }
3844
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003845 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3846 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003847 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003848 * instruction manually, in order to provide additional
3849 * functionalities needed for live migration.
3850 */
3851 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3852 goto retry;
3853 }
3854
3855 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3856 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003857 * Re-enable CMM virtualization if CMMA is available and
3858 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003859 */
3860 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003861 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003862 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3863 goto retry;
3864 }
3865
David Hildenbrand0759d062014-05-13 16:54:32 +02003866 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003867 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003868 /* we left the vsie handler, nothing to do, just clear the request */
3869 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003870
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003871 return 0;
3872}
3873
David Hildenbrand0e7def52018-02-07 12:46:43 +01003874void kvm_s390_set_tod_clock(struct kvm *kvm,
3875 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003876{
3877 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003878 union tod_clock clk;
Marc Zyngier46808a42021-11-16 16:04:02 +00003879 unsigned long i;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003880
3881 mutex_lock(&kvm->lock);
3882 preempt_disable();
3883
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003884 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003885
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003886 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003887 kvm->arch.epdx = 0;
3888 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003889 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003890 if (kvm->arch.epoch > gtod->tod)
3891 kvm->arch.epdx -= 1;
3892 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003893
3894 kvm_s390_vcpu_block_all(kvm);
3895 kvm_for_each_vcpu(i, vcpu, kvm) {
3896 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3897 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3898 }
3899
3900 kvm_s390_vcpu_unblock_all(kvm);
3901 preempt_enable();
3902 mutex_unlock(&kvm->lock);
3903}
3904
Thomas Huthfa576c52014-05-06 17:20:16 +02003905/**
3906 * kvm_arch_fault_in_page - fault-in guest page if necessary
3907 * @vcpu: The corresponding virtual cpu
3908 * @gpa: Guest physical address
3909 * @writable: Whether the page should be writable or not
3910 *
3911 * Make sure that a guest page has been faulted-in on the host.
3912 *
3913 * Return: Zero on success, negative error code otherwise.
3914 */
3915long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003916{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003917 return gmap_fault(vcpu->arch.gmap, gpa,
3918 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003919}
3920
Dominik Dingel3c038e62013-10-07 17:11:48 +02003921static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3922 unsigned long token)
3923{
3924 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003925 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003926
3927 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003928 irq.u.ext.ext_params2 = token;
3929 irq.type = KVM_S390_INT_PFAULT_INIT;
3930 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003931 } else {
3932 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003933 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003934 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3935 }
3936}
3937
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003938bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003939 struct kvm_async_pf *work)
3940{
3941 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3942 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003943
3944 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003945}
3946
3947void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3948 struct kvm_async_pf *work)
3949{
3950 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3951 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3952}
3953
3954void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3955 struct kvm_async_pf *work)
3956{
3957 /* s390 will always inject the page directly */
3958}
3959
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003960bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003961{
3962 /*
3963 * s390 will always inject the page directly,
3964 * but we still want check_async_completion to cleanup
3965 */
3966 return true;
3967}
3968
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003969static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003970{
3971 hva_t hva;
3972 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003973
3974 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003975 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003976 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3977 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003978 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003979 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003980 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02003981 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003982 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003983 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003984 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003985 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003986 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003987
Heiko Carstens81480cc2014-01-01 16:36:07 +01003988 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3989 hva += current->thread.gmap_addr & ~PAGE_MASK;
3990 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003991 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003992
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003993 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02003994}
3995
Thomas Huth3fb4c402013-09-12 10:33:43 +02003996static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003997{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003998 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003999
Dominik Dingel3c038e62013-10-07 17:11:48 +02004000 /*
4001 * On s390 notifications for arriving pages will be delivered directly
4002 * to the guest but the house keeping for completed pfaults is
4003 * handled outside the worker.
4004 */
4005 kvm_check_async_pf_completion(vcpu);
4006
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004007 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4008 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004009
4010 if (need_resched())
4011 schedule();
4012
Jens Freimann79395032014-04-17 10:10:30 +02004013 if (!kvm_is_ucontrol(vcpu->kvm)) {
4014 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4015 if (rc)
4016 return rc;
4017 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004018
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004019 rc = kvm_s390_handle_requests(vcpu);
4020 if (rc)
4021 return rc;
4022
David Hildenbrand27291e22014-01-23 12:26:52 +01004023 if (guestdbg_enabled(vcpu)) {
4024 kvm_s390_backup_guest_per_regs(vcpu);
4025 kvm_s390_patch_guest_per_regs(vcpu);
4026 }
4027
Sean Christopherson4eeef242021-09-10 11:32:19 -07004028 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
Michael Mueller9f30f622019-01-31 09:52:44 +01004029
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004030 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004031 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4032 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4033 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004034
Thomas Huth3fb4c402013-09-12 10:33:43 +02004035 return 0;
4036}
4037
Thomas Huth492d8642015-02-10 16:11:01 +01004038static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4039{
David Hildenbrand56317922016-01-12 17:37:58 +01004040 struct kvm_s390_pgm_info pgm_info = {
4041 .code = PGM_ADDRESSING,
4042 };
4043 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004044 int rc;
4045
4046 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4047 trace_kvm_s390_sie_fault(vcpu);
4048
4049 /*
4050 * We want to inject an addressing exception, which is defined as a
4051 * suppressing or terminating exception. However, since we came here
4052 * by a DAT access exception, the PSW still points to the faulting
4053 * instruction since DAT exceptions are nullifying. So we've got
4054 * to look up the current opcode to get the length of the instruction
4055 * to be able to forward the PSW.
4056 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004057 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004058 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004059 if (rc < 0) {
4060 return rc;
4061 } else if (rc) {
4062 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4063 * Forward by arbitrary ilc, injection will take care of
4064 * nullification if necessary.
4065 */
4066 pgm_info = vcpu->arch.pgm;
4067 ilen = 4;
4068 }
David Hildenbrand56317922016-01-12 17:37:58 +01004069 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4070 kvm_s390_forward_psw(vcpu, ilen);
4071 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004072}
4073
Thomas Huth3fb4c402013-09-12 10:33:43 +02004074static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4075{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004076 struct mcck_volatile_info *mcck_info;
4077 struct sie_page *sie_page;
4078
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004079 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4080 vcpu->arch.sie_block->icptcode);
4081 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4082
David Hildenbrand27291e22014-01-23 12:26:52 +01004083 if (guestdbg_enabled(vcpu))
4084 kvm_s390_restore_guest_per_regs(vcpu);
4085
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004086 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4087 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004088
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004089 if (exit_reason == -EINTR) {
4090 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4091 sie_page = container_of(vcpu->arch.sie_block,
4092 struct sie_page, sie_block);
4093 mcck_info = &sie_page->mcck_info;
4094 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4095 return 0;
4096 }
4097
David Hildenbrand71f116b2015-10-19 16:24:28 +02004098 if (vcpu->arch.sie_block->icptcode > 0) {
4099 int rc = kvm_handle_sie_intercept(vcpu);
4100
4101 if (rc != -EOPNOTSUPP)
4102 return rc;
4103 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4104 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4105 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4106 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4107 return -EREMOTE;
4108 } else if (exit_reason != -EFAULT) {
4109 vcpu->stat.exit_null++;
4110 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004111 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4112 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4113 vcpu->run->s390_ucontrol.trans_exc_code =
4114 current->thread.gmap_addr;
4115 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004116 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004117 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004118 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004119 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004120 if (kvm_arch_setup_async_pf(vcpu))
4121 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004122 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004123 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004124 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004125 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004126}
4127
Janosch Frank3adae0b2019-12-13 08:26:06 -05004128#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004129static int __vcpu_run(struct kvm_vcpu *vcpu)
4130{
4131 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004132 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004133
Thomas Huth800c1062013-09-12 10:33:45 +02004134 /*
4135 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4136 * ning the guest), so that memslots (and other stuff) are protected
4137 */
4138 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4139
Thomas Hutha76ccff2013-09-12 10:33:44 +02004140 do {
4141 rc = vcpu_pre_run(vcpu);
4142 if (rc)
4143 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004144
Thomas Huth800c1062013-09-12 10:33:45 +02004145 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004146 /*
4147 * As PF_VCPU will be used in fault handler, between
4148 * guest_enter and guest_exit should be no uaccess.
4149 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004150 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004151 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004152 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004153 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004154 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4155 memcpy(sie_page->pv_grregs,
4156 vcpu->run->s.regs.gprs,
4157 sizeof(sie_page->pv_grregs));
4158 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004159 if (test_cpu_flag(CIF_FPU))
4160 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004161 exit_reason = sie64a(vcpu->arch.sie_block,
4162 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004163 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4164 memcpy(vcpu->run->s.regs.gprs,
4165 sie_page->pv_grregs,
4166 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004167 /*
4168 * We're not allowed to inject interrupts on intercepts
4169 * that leave the guest state in an "in-between" state
4170 * where the next SIE entry will do a continuation.
4171 * Fence interrupts in our "internal" PSW.
4172 */
4173 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4174 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4175 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4176 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004177 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004178 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004179 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004180 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004181 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004182 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004183
Thomas Hutha76ccff2013-09-12 10:33:44 +02004184 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004185 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004186
Thomas Huth800c1062013-09-12 10:33:45 +02004187 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004188 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004189}
4190
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004191static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004192{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004193 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004194 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004195 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004196
4197 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004198 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004199 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4200 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004201 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004202 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4203 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4204 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4205 }
4206 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4207 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4208 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4209 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004210 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4211 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004212 }
Collin Walling23a60f82020-06-22 11:46:36 -04004213 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4214 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4215 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
Collin Walling3fd84172021-10-26 22:54:51 -04004216 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
Collin Walling23a60f82020-06-22 11:46:36 -04004217 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004218 /*
4219 * If userspace sets the riccb (e.g. after migration) to a valid state,
4220 * we should enable RI here instead of doing the lazy enablement.
4221 */
4222 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004223 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004224 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004225 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004226 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004227 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004228 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004229 /*
4230 * If userspace sets the gscb (e.g. after migration) to non-zero,
4231 * we should enable GS here instead of doing the lazy enablement.
4232 */
4233 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4234 test_kvm_facility(vcpu->kvm, 133) &&
4235 gscb->gssm &&
4236 !vcpu->arch.gs_enabled) {
4237 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4238 vcpu->arch.sie_block->ecb |= ECB_GS;
4239 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4240 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004241 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004242 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4243 test_kvm_facility(vcpu->kvm, 82)) {
4244 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4245 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4246 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004247 if (MACHINE_HAS_GS) {
4248 preempt_disable();
4249 __ctl_set_bit(2, 4);
4250 if (current->thread.gs_cb) {
4251 vcpu->arch.host_gscb = current->thread.gs_cb;
4252 save_gs_cb(vcpu->arch.host_gscb);
4253 }
4254 if (vcpu->arch.gs_enabled) {
4255 current->thread.gs_cb = (struct gs_cb *)
4256 &vcpu->run->s.regs.gscb;
4257 restore_gs_cb(current->thread.gs_cb);
4258 }
4259 preempt_enable();
4260 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004261 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004262}
4263
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004264static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004265{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004266 struct kvm_run *kvm_run = vcpu->run;
4267
Janosch Frank811ea792019-06-14 13:11:21 +02004268 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4269 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4270 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4271 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4272 /* some control register changes require a tlb flush */
4273 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4274 }
4275 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4276 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4277 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4278 }
4279 save_access_regs(vcpu->arch.host_acrs);
4280 restore_access_regs(vcpu->run->s.regs.acrs);
4281 /* save host (userspace) fprs/vrs */
4282 save_fpu_regs();
4283 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4284 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4285 if (MACHINE_HAS_VX)
4286 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4287 else
4288 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4289 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4290 if (test_fp_ctl(current->thread.fpu.fpc))
4291 /* User space provided an invalid FPC, let's clear it */
4292 current->thread.fpu.fpc = 0;
4293
4294 /* Sync fmt2 only data */
4295 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004296 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004297 } else {
4298 /*
4299 * In several places we have to modify our internal view to
4300 * not do things that are disallowed by the ultravisor. For
4301 * example we must not inject interrupts after specific exits
4302 * (e.g. 112 prefix page not secure). We do this by turning
4303 * off the machine check, external and I/O interrupt bits
4304 * of our PSW copy. To avoid getting validity intercepts, we
4305 * do only accept the condition code from userspace.
4306 */
4307 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4308 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4309 PSW_MASK_CC;
4310 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004311
David Hildenbrandb028ee32014-07-17 10:47:43 +02004312 kvm_run->kvm_dirty_regs = 0;
4313}
4314
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004315static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004316{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004317 struct kvm_run *kvm_run = vcpu->run;
4318
David Hildenbrandb028ee32014-07-17 10:47:43 +02004319 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4320 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4321 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004322 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004323 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004324 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004325 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004326 __ctl_set_bit(2, 4);
4327 if (vcpu->arch.gs_enabled)
4328 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004329 current->thread.gs_cb = vcpu->arch.host_gscb;
4330 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004331 if (!vcpu->arch.host_gscb)
4332 __ctl_clear_bit(2, 4);
4333 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004334 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004335 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004336 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004337}
4338
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004339static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004340{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004341 struct kvm_run *kvm_run = vcpu->run;
4342
Janosch Frank811ea792019-06-14 13:11:21 +02004343 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4344 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4345 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4346 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4347 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4348 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4349 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4350 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4351 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4352 save_access_regs(vcpu->run->s.regs.acrs);
4353 restore_access_regs(vcpu->arch.host_acrs);
4354 /* Save guest register state */
4355 save_fpu_regs();
4356 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4357 /* Restore will be done lazily at return */
4358 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4359 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4360 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004361 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004362}
4363
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004364int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004365{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004366 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004367 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004368
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004369 if (kvm_run->immediate_exit)
4370 return -EINTR;
4371
Thomas Huth200824f2019-09-04 10:51:59 +02004372 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4373 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4374 return -EINVAL;
4375
Christoffer Dallaccb7572017-12-04 21:35:25 +01004376 vcpu_load(vcpu);
4377
David Hildenbrand27291e22014-01-23 12:26:52 +01004378 if (guestdbg_exit_pending(vcpu)) {
4379 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004380 rc = 0;
4381 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004382 }
4383
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004384 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004385
Janosch Frankfe28c7862019-05-15 13:24:30 +02004386 /*
4387 * no need to check the return value of vcpu_start as it can only have
4388 * an error for protvirt, but protvirt means user cpu state
4389 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004390 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4391 kvm_s390_vcpu_start(vcpu);
4392 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004393 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004394 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004395 rc = -EINVAL;
4396 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004397 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004398
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004399 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004400 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004401
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004402 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004403 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004404
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004405 if (signal_pending(current) && !rc) {
4406 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004407 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004408 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004409
David Hildenbrand27291e22014-01-23 12:26:52 +01004410 if (guestdbg_exit_pending(vcpu) && !rc) {
4411 kvm_s390_prepare_debug_exit(vcpu);
4412 rc = 0;
4413 }
4414
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004415 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004416 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004417 rc = 0;
4418 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004419
David Hildenbranddb0758b2016-02-15 09:42:25 +01004420 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004421 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004422
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004423 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004424
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004425 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004426out:
4427 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004428 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004429}
4430
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004431/*
4432 * store status at address
4433 * we use have two special cases:
4434 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4435 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4436 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004437int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004438{
Carsten Otte092670c2011-07-24 10:48:22 +02004439 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004440 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004441 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004442 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004443 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004444
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004445 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004446 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4447 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004448 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004449 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004450 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4451 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004452 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004453 gpa = px;
4454 } else
4455 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004456
4457 /* manually convert vector registers if necessary */
4458 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004459 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004460 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4461 fprs, 128);
4462 } else {
4463 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004464 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004465 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004466 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004467 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004468 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004469 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004470 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004471 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004472 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004473 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004474 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004475 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004476 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004477 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004478 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004479 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004480 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004481 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004482 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004483 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004484 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004485 &vcpu->arch.sie_block->gcr, 128);
4486 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004487}
4488
Thomas Huthe8798922013-11-06 15:46:33 +01004489int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4490{
4491 /*
4492 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004493 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004494 * it into the save area
4495 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004496 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004497 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004498 save_access_regs(vcpu->run->s.regs.acrs);
4499
4500 return kvm_s390_store_status_unloaded(vcpu, addr);
4501}
4502
David Hildenbrand8ad35752014-03-14 11:00:21 +01004503static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4504{
4505 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004506 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004507}
4508
4509static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4510{
Marc Zyngier46808a42021-11-16 16:04:02 +00004511 unsigned long i;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004512 struct kvm_vcpu *vcpu;
4513
4514 kvm_for_each_vcpu(i, vcpu, kvm) {
4515 __disable_ibs_on_vcpu(vcpu);
4516 }
4517}
4518
4519static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4520{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004521 if (!sclp.has_ibs)
4522 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004523 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004524 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004525}
4526
Janosch Frankfe28c7862019-05-15 13:24:30 +02004527int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004528{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004529 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004530
4531 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004532 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004533
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004534 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004535 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004536 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004537 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4538
Janosch Frankfe28c7862019-05-15 13:24:30 +02004539 /* Let's tell the UV that we want to change into the operating state */
4540 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4541 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4542 if (r) {
4543 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4544 return r;
4545 }
4546 }
4547
David Hildenbrand8ad35752014-03-14 11:00:21 +01004548 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004549 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
David Hildenbrand8ad35752014-03-14 11:00:21 +01004550 started_vcpus++;
4551 }
4552
4553 if (started_vcpus == 0) {
4554 /* we're the only active VCPU -> speed it up */
4555 __enable_ibs_on_vcpu(vcpu);
4556 } else if (started_vcpus == 1) {
4557 /*
4558 * As we are starting a second VCPU, we have to disable
4559 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304560 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004561 */
4562 __disable_ibs_on_all_vcpus(vcpu->kvm);
4563 }
4564
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004565 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004566 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004567 * The real PSW might have changed due to a RESTART interpreted by the
4568 * ultravisor. We block all interrupts and let the next sie exit
4569 * refresh our view.
4570 */
4571 if (kvm_s390_pv_cpu_is_protected(vcpu))
4572 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4573 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004574 * Another VCPU might have used IBS while we were offline.
4575 * Let's play safe and flush the VCPU at startup.
4576 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004577 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004578 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004579 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004580}
4581
Janosch Frankfe28c7862019-05-15 13:24:30 +02004582int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004583{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004584 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004585 struct kvm_vcpu *started_vcpu = NULL;
4586
4587 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004588 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004589
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004590 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004591 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004592 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004593 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4594
Janosch Frankfe28c7862019-05-15 13:24:30 +02004595 /* Let's tell the UV that we want to change into the stopped state */
4596 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4597 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4598 if (r) {
4599 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4600 return r;
4601 }
4602 }
4603
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004604 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004605 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004606
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004607 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004608 __disable_ibs_on_vcpu(vcpu);
4609
4610 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004611 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
4612
4613 if (!is_vcpu_stopped(tmp)) {
David Hildenbrand8ad35752014-03-14 11:00:21 +01004614 started_vcpus++;
Marc Zyngier113d10b2021-11-16 16:03:59 +00004615 started_vcpu = tmp;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004616 }
4617 }
4618
4619 if (started_vcpus == 1) {
4620 /*
4621 * As we only have one VCPU left, we want to enable the
4622 * IBS facility for that VCPU to speed it up.
4623 */
4624 __enable_ibs_on_vcpu(started_vcpu);
4625 }
4626
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004627 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004628 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004629}
4630
Cornelia Huckd6712df2012-12-20 15:32:11 +01004631static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4632 struct kvm_enable_cap *cap)
4633{
4634 int r;
4635
4636 if (cap->flags)
4637 return -EINVAL;
4638
4639 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004640 case KVM_CAP_S390_CSS_SUPPORT:
4641 if (!vcpu->kvm->arch.css_support) {
4642 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004643 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004644 trace_kvm_s390_enable_css(vcpu->kvm);
4645 }
4646 r = 0;
4647 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004648 default:
4649 r = -EINVAL;
4650 break;
4651 }
4652 return r;
4653}
4654
Janosch Frank19e12272019-04-02 09:21:06 +02004655static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4656 struct kvm_s390_mem_op *mop)
4657{
4658 void __user *uaddr = (void __user *)mop->buf;
4659 int r = 0;
4660
4661 if (mop->flags || !mop->size)
4662 return -EINVAL;
4663 if (mop->size + mop->sida_offset < mop->size)
4664 return -EINVAL;
4665 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4666 return -E2BIG;
4667
4668 switch (mop->op) {
4669 case KVM_S390_MEMOP_SIDA_READ:
4670 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4671 mop->sida_offset), mop->size))
4672 r = -EFAULT;
4673
4674 break;
4675 case KVM_S390_MEMOP_SIDA_WRITE:
4676 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4677 mop->sida_offset), uaddr, mop->size))
4678 r = -EFAULT;
4679 break;
4680 }
4681 return r;
4682}
Thomas Huth41408c282015-02-06 15:01:21 +01004683static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4684 struct kvm_s390_mem_op *mop)
4685{
4686 void __user *uaddr = (void __user *)mop->buf;
4687 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004688 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004689 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4690 | KVM_S390_MEMOP_F_CHECK_ONLY;
4691
Thomas Hutha13b03b2019-08-29 14:25:17 +02004692 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004693 return -EINVAL;
4694
4695 if (mop->size > MEM_OP_MAX_SIZE)
4696 return -E2BIG;
4697
Janosch Frank19e12272019-04-02 09:21:06 +02004698 if (kvm_s390_pv_cpu_is_protected(vcpu))
4699 return -EINVAL;
4700
Thomas Huth41408c282015-02-06 15:01:21 +01004701 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4702 tmpbuf = vmalloc(mop->size);
4703 if (!tmpbuf)
4704 return -ENOMEM;
4705 }
4706
Thomas Huth41408c282015-02-06 15:01:21 +01004707 switch (mop->op) {
4708 case KVM_S390_MEMOP_LOGICAL_READ:
4709 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004710 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4711 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004712 break;
4713 }
4714 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4715 if (r == 0) {
4716 if (copy_to_user(uaddr, tmpbuf, mop->size))
4717 r = -EFAULT;
4718 }
4719 break;
4720 case KVM_S390_MEMOP_LOGICAL_WRITE:
4721 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004722 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4723 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004724 break;
4725 }
4726 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4727 r = -EFAULT;
4728 break;
4729 }
4730 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4731 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004732 }
4733
Thomas Huth41408c282015-02-06 15:01:21 +01004734 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4735 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4736
4737 vfree(tmpbuf);
4738 return r;
4739}
4740
Janosch Frank19e12272019-04-02 09:21:06 +02004741static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4742 struct kvm_s390_mem_op *mop)
4743{
4744 int r, srcu_idx;
4745
4746 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4747
4748 switch (mop->op) {
4749 case KVM_S390_MEMOP_LOGICAL_READ:
4750 case KVM_S390_MEMOP_LOGICAL_WRITE:
4751 r = kvm_s390_guest_mem_op(vcpu, mop);
4752 break;
4753 case KVM_S390_MEMOP_SIDA_READ:
4754 case KVM_S390_MEMOP_SIDA_WRITE:
4755 /* we are locked against sida going away by the vcpu->mutex */
4756 r = kvm_s390_guest_sida_op(vcpu, mop);
4757 break;
4758 default:
4759 r = -EINVAL;
4760 }
4761
4762 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4763 return r;
4764}
4765
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004766long kvm_arch_vcpu_async_ioctl(struct file *filp,
4767 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004768{
4769 struct kvm_vcpu *vcpu = filp->private_data;
4770 void __user *argp = (void __user *)arg;
4771
Avi Kivity93736622010-05-13 12:35:17 +03004772 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004773 case KVM_S390_IRQ: {
4774 struct kvm_s390_irq s390irq;
4775
Jens Freimann47b43c52014-11-11 20:57:06 +01004776 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004777 return -EFAULT;
4778 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004779 }
Avi Kivity93736622010-05-13 12:35:17 +03004780 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004781 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004782 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004783
4784 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004785 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004786 if (s390int_to_s390irq(&s390int, &s390irq))
4787 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004788 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004789 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004790 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004791 return -ENOIOCTLCMD;
4792}
4793
4794long kvm_arch_vcpu_ioctl(struct file *filp,
4795 unsigned int ioctl, unsigned long arg)
4796{
4797 struct kvm_vcpu *vcpu = filp->private_data;
4798 void __user *argp = (void __user *)arg;
4799 int idx;
4800 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004801 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004802
4803 vcpu_load(vcpu);
4804
4805 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004806 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004807 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004808 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004809 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004810 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004811 case KVM_S390_SET_INITIAL_PSW: {
4812 psw_t psw;
4813
Avi Kivitybc923cc2010-05-13 12:21:46 +03004814 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004815 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004816 break;
4817 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4818 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004819 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004820 case KVM_S390_CLEAR_RESET:
4821 r = 0;
4822 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004823 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4824 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4825 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4826 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4827 rc, rrc);
4828 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004829 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004830 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004831 r = 0;
4832 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004833 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4834 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4835 UVC_CMD_CPU_RESET_INITIAL,
4836 &rc, &rrc);
4837 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4838 rc, rrc);
4839 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004840 break;
4841 case KVM_S390_NORMAL_RESET:
4842 r = 0;
4843 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004844 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4845 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4846 UVC_CMD_CPU_RESET, &rc, &rrc);
4847 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4848 rc, rrc);
4849 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004850 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004851 case KVM_SET_ONE_REG:
4852 case KVM_GET_ONE_REG: {
4853 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004854 r = -EINVAL;
4855 if (kvm_s390_pv_cpu_is_protected(vcpu))
4856 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004857 r = -EFAULT;
4858 if (copy_from_user(&reg, argp, sizeof(reg)))
4859 break;
4860 if (ioctl == KVM_SET_ONE_REG)
4861 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4862 else
4863 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4864 break;
4865 }
Carsten Otte27e03932012-01-04 10:25:21 +01004866#ifdef CONFIG_KVM_S390_UCONTROL
4867 case KVM_S390_UCAS_MAP: {
4868 struct kvm_s390_ucas_mapping ucasmap;
4869
4870 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4871 r = -EFAULT;
4872 break;
4873 }
4874
4875 if (!kvm_is_ucontrol(vcpu->kvm)) {
4876 r = -EINVAL;
4877 break;
4878 }
4879
4880 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4881 ucasmap.vcpu_addr, ucasmap.length);
4882 break;
4883 }
4884 case KVM_S390_UCAS_UNMAP: {
4885 struct kvm_s390_ucas_mapping ucasmap;
4886
4887 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4888 r = -EFAULT;
4889 break;
4890 }
4891
4892 if (!kvm_is_ucontrol(vcpu->kvm)) {
4893 r = -EINVAL;
4894 break;
4895 }
4896
4897 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4898 ucasmap.length);
4899 break;
4900 }
4901#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004902 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004903 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004904 break;
4905 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004906 case KVM_ENABLE_CAP:
4907 {
4908 struct kvm_enable_cap cap;
4909 r = -EFAULT;
4910 if (copy_from_user(&cap, argp, sizeof(cap)))
4911 break;
4912 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4913 break;
4914 }
Thomas Huth41408c282015-02-06 15:01:21 +01004915 case KVM_S390_MEM_OP: {
4916 struct kvm_s390_mem_op mem_op;
4917
4918 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004919 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004920 else
4921 r = -EFAULT;
4922 break;
4923 }
Jens Freimann816c7662014-11-24 17:13:46 +01004924 case KVM_S390_SET_IRQ_STATE: {
4925 struct kvm_s390_irq_state irq_state;
4926
4927 r = -EFAULT;
4928 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4929 break;
4930 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4931 irq_state.len == 0 ||
4932 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4933 r = -EINVAL;
4934 break;
4935 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004936 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004937 r = kvm_s390_set_irq_state(vcpu,
4938 (void __user *) irq_state.buf,
4939 irq_state.len);
4940 break;
4941 }
4942 case KVM_S390_GET_IRQ_STATE: {
4943 struct kvm_s390_irq_state irq_state;
4944
4945 r = -EFAULT;
4946 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4947 break;
4948 if (irq_state.len == 0) {
4949 r = -EINVAL;
4950 break;
4951 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004952 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004953 r = kvm_s390_get_irq_state(vcpu,
4954 (__u8 __user *) irq_state.buf,
4955 irq_state.len);
4956 break;
4957 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004958 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004959 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004960 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004961
4962 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004963 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004964}
4965
Souptick Joarder1499fa82018-04-19 00:49:58 +05304966vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004967{
4968#ifdef CONFIG_KVM_S390_UCONTROL
4969 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4970 && (kvm_is_ucontrol(vcpu->kvm))) {
4971 vmf->page = virt_to_page(vcpu->arch.sie_block);
4972 get_page(vmf->page);
4973 return 0;
4974 }
4975#endif
4976 return VM_FAULT_SIGBUS;
4977}
4978
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004979/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004980int kvm_arch_prepare_memory_region(struct kvm *kvm,
Sean Christopherson537a17b2021-12-06 20:54:11 +01004981 const struct kvm_memory_slot *old,
4982 struct kvm_memory_slot *new,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004983 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004984{
Sean Christophersonec5c8692021-12-06 20:54:21 +01004985 gpa_t size;
4986
4987 /* When we are protected, we should not change the memory slots */
4988 if (kvm_s390_pv_get_handle(kvm))
4989 return -EINVAL;
4990
4991 if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
4992 return 0;
Sean Christophersoncf5b4862021-12-06 20:54:15 +01004993
Nick Wangdd2887e2013-03-25 17:22:57 +01004994 /* A few sanity checks. We can have memory slots which have to be
4995 located/ended at a segment boundary (1MB). The memory in userland is
4996 ok to be fragmented into various different vmas. It is okay to mmap()
4997 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004998
Sean Christophersoncf5b4862021-12-06 20:54:15 +01004999 if (new->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005000 return -EINVAL;
5001
Sean Christophersonec5c8692021-12-06 20:54:21 +01005002 size = new->npages * PAGE_SIZE;
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005003 if (size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005004 return -EINVAL;
5005
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005006 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
Dominik Dingela3a92c32014-12-01 17:24:42 +01005007 return -EINVAL;
5008
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005009 return 0;
5010}
5011
5012void kvm_arch_commit_memory_region(struct kvm *kvm,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005013 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005014 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005015 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005016{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005017 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005018
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005019 switch (change) {
5020 case KVM_MR_DELETE:
5021 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5022 old->npages * PAGE_SIZE);
5023 break;
5024 case KVM_MR_MOVE:
5025 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5026 old->npages * PAGE_SIZE);
5027 if (rc)
5028 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005029 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005030 case KVM_MR_CREATE:
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005031 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5032 new->base_gfn * PAGE_SIZE,
5033 new->npages * PAGE_SIZE);
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005034 break;
5035 case KVM_MR_FLAGS_ONLY:
5036 break;
5037 default:
5038 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5039 }
Carsten Otte598841c2011-07-24 10:48:21 +02005040 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005041 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005042 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005043}
5044
Alexander Yarygin60a37702016-04-01 15:38:57 +03005045static inline unsigned long nonhyp_mask(int i)
5046{
5047 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5048
5049 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5050}
5051
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005052void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5053{
5054 vcpu->valid_wakeup = false;
5055}
5056
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005057static int __init kvm_s390_init(void)
5058{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005059 int i;
5060
David Hildenbrand07197fd2015-01-30 16:01:38 +01005061 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005062 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005063 return -ENODEV;
5064 }
5065
Janosch Franka4499382018-07-13 11:28:31 +01005066 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005067 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005068 return -EINVAL;
5069 }
5070
Alexander Yarygin60a37702016-04-01 15:38:57 +03005071 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005072 kvm_s390_fac_base[i] |=
Sven Schnelle17e89e12021-05-05 22:01:10 +02005073 stfle_fac_list[i] & nonhyp_mask(i);
Alexander Yarygin60a37702016-04-01 15:38:57 +03005074
Michael Mueller9d8d5782015-02-02 15:42:51 +01005075 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005076}
5077
5078static void __exit kvm_s390_exit(void)
5079{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005080 kvm_exit();
5081}
5082
5083module_init(kvm_s390_init);
5084module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005085
5086/*
5087 * Enable autoloading of the kvm module.
5088 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5089 * since x86 takes a different approach.
5090 */
5091#include <linux/miscdevice.h>
5092MODULE_ALIAS_MISCDEV(KVM_MINOR);
5093MODULE_ALIAS("devname:kvm");