blob: 4527ac7b5961dde9ce74cb80c89c88f2777e79aa [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
69static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
70 sizeof(struct kvm_vm_stat) / sizeof(u64));
71
72const struct kvm_stats_header kvm_vm_stats_header = {
73 .name_size = KVM_STATS_NAME_SIZE,
74 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
75 .id_offset = sizeof(struct kvm_stats_header),
76 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
77 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
78 sizeof(kvm_vm_stats_desc),
79};
80
Jing Zhangce55c042021-06-18 22:27:06 +000081const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
82 KVM_GENERIC_VCPU_STATS(),
83 STATS_DESC_COUNTER(VCPU, exit_userspace),
84 STATS_DESC_COUNTER(VCPU, exit_null),
85 STATS_DESC_COUNTER(VCPU, exit_external_request),
86 STATS_DESC_COUNTER(VCPU, exit_io_request),
87 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
88 STATS_DESC_COUNTER(VCPU, exit_stop_request),
89 STATS_DESC_COUNTER(VCPU, exit_validity),
90 STATS_DESC_COUNTER(VCPU, exit_instruction),
91 STATS_DESC_COUNTER(VCPU, exit_pei),
92 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
93 STATS_DESC_COUNTER(VCPU, instruction_lctl),
94 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
95 STATS_DESC_COUNTER(VCPU, instruction_stctl),
96 STATS_DESC_COUNTER(VCPU, instruction_stctg),
97 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
98 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
99 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
100 STATS_DESC_COUNTER(VCPU, deliver_ckc),
101 STATS_DESC_COUNTER(VCPU, deliver_cputm),
102 STATS_DESC_COUNTER(VCPU, deliver_external_call),
103 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
104 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_virtio),
106 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
108 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
109 STATS_DESC_COUNTER(VCPU, deliver_program),
110 STATS_DESC_COUNTER(VCPU, deliver_io),
111 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
112 STATS_DESC_COUNTER(VCPU, exit_wait_state),
113 STATS_DESC_COUNTER(VCPU, inject_ckc),
114 STATS_DESC_COUNTER(VCPU, inject_cputm),
115 STATS_DESC_COUNTER(VCPU, inject_external_call),
116 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
117 STATS_DESC_COUNTER(VCPU, inject_mchk),
118 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
119 STATS_DESC_COUNTER(VCPU, inject_program),
120 STATS_DESC_COUNTER(VCPU, inject_restart),
121 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
122 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
123 STATS_DESC_COUNTER(VCPU, instruction_epsw),
124 STATS_DESC_COUNTER(VCPU, instruction_gs),
125 STATS_DESC_COUNTER(VCPU, instruction_io_other),
126 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
127 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
128 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
129 STATS_DESC_COUNTER(VCPU, instruction_ptff),
130 STATS_DESC_COUNTER(VCPU, instruction_sck),
131 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
132 STATS_DESC_COUNTER(VCPU, instruction_stidp),
133 STATS_DESC_COUNTER(VCPU, instruction_spx),
134 STATS_DESC_COUNTER(VCPU, instruction_stpx),
135 STATS_DESC_COUNTER(VCPU, instruction_stap),
136 STATS_DESC_COUNTER(VCPU, instruction_iske),
137 STATS_DESC_COUNTER(VCPU, instruction_ri),
138 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
139 STATS_DESC_COUNTER(VCPU, instruction_sske),
140 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
141 STATS_DESC_COUNTER(VCPU, instruction_stsi),
142 STATS_DESC_COUNTER(VCPU, instruction_stfl),
143 STATS_DESC_COUNTER(VCPU, instruction_tb),
144 STATS_DESC_COUNTER(VCPU, instruction_tpi),
145 STATS_DESC_COUNTER(VCPU, instruction_tprot),
146 STATS_DESC_COUNTER(VCPU, instruction_tsch),
147 STATS_DESC_COUNTER(VCPU, instruction_sie),
148 STATS_DESC_COUNTER(VCPU, instruction_essa),
149 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
164 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
165 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
Christian Borntraegerbb000f642021-07-26 17:01:08 +0200166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
167 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
168 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
169 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
170 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
173 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
174 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
Jing Zhangce55c042021-06-18 22:27:06 +0000175 STATS_DESC_COUNTER(VCPU, pfault_sync)
176};
177static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
178 sizeof(struct kvm_vcpu_stat) / sizeof(u64));
179
180const struct kvm_stats_header kvm_vcpu_stats_header = {
181 .name_size = KVM_STATS_NAME_SIZE,
182 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
183 .id_offset = sizeof(struct kvm_stats_header),
184 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
185 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
186 sizeof(kvm_vcpu_stats_desc),
187};
188
David Hildenbranda411edf2016-02-02 15:41:22 +0100189/* allow nested virtualization in KVM (if enabled by user space) */
190static int nested;
191module_param(nested, int, S_IRUGO);
192MODULE_PARM_DESC(nested, "Nested virtualization support");
193
Janosch Franka4499382018-07-13 11:28:31 +0100194/* allow 1m huge page guest backing, if !nested */
195static int hpage;
196module_param(hpage, int, 0444);
197MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100198
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199/* maximum percentage of steal time for polling. >100 is treated like 100 */
200static u8 halt_poll_max_steal = 10;
201module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000202MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500203
Michael Muellercc674ef2020-02-27 10:10:31 +0100204/* if set to true, the GISA will be initialized and used if available */
205static bool use_gisa = true;
206module_param(use_gisa, bool, 0644);
207MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
208
Pierre Morel87e28a12020-09-07 15:26:07 +0200209/* maximum diag9c forwarding per second */
210unsigned int diag9c_forwarding_hz;
211module_param(diag9c_forwarding_hz, uint, 0644);
212MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
213
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000214/*
215 * For now we handle at most 16 double words as this is what the s390 base
216 * kernel handles and stores in the prefix page. If we ever need to go beyond
217 * this, this requires changes to code, but the external uapi can stay.
218 */
219#define SIZE_INTERNAL 16
220
221/*
222 * Base feature mask that defines default mask for facilities. Consists of the
223 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
224 */
225static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
226/*
227 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
228 * and defines the facilities that can be enabled via a cpu model.
229 */
230static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
231
232static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200233{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
235 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
236 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
Sven Schnelle17e89e12021-05-05 22:01:10 +0200237 sizeof(stfle_fac_list));
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000238
239 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200240}
241
David Hildenbrand15c97052015-03-19 17:36:43 +0100242/* available cpu features supported by kvm */
243static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200244/* available subfunctions indicated via query / "test bit" */
245static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100246
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200248static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200249debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400250debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100251
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100252/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200253int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100254{
255 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200256 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100257}
258
Sean Christophersonb9904082020-03-21 13:25:55 -0700259int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700260{
261 return 0;
262}
263
Janosch Frank29b40f12019-09-30 04:19:18 -0400264/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100265static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
266 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400267static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200268
David Hildenbrand15757672018-02-07 12:46:45 +0100269static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
270{
271 u8 delta_idx = 0;
272
273 /*
274 * The TOD jumps by delta, we have to compensate this by adding
275 * -delta to the epoch.
276 */
277 delta = -delta;
278
279 /* sign-extension - we're adding to signed values below */
280 if ((s64)delta < 0)
281 delta_idx = -1;
282
283 scb->epoch += delta;
284 if (scb->ecd & ECD_MEF) {
285 scb->epdx += delta_idx;
286 if (scb->epoch < delta)
287 scb->epdx += 1;
288 }
289}
290
Fan Zhangfdf03652015-05-13 10:58:41 +0200291/*
292 * This callback is executed during stop_machine(). All CPUs are therefore
293 * temporarily stopped. In order not to change guest behavior, we have to
294 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
295 * so a CPU won't be stopped while calculating with the epoch.
296 */
297static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
298 void *v)
299{
300 struct kvm *kvm;
301 struct kvm_vcpu *vcpu;
302 int i;
303 unsigned long long *delta = v;
304
305 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200306 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100307 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
308 if (i == 0) {
309 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
310 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
311 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100312 if (vcpu->arch.cputm_enabled)
313 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100314 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100315 kvm_clock_sync_scb(vcpu->arch.vsie_block,
316 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200317 }
318 }
319 return NOTIFY_OK;
320}
321
322static struct notifier_block kvm_clock_notifier = {
323 .notifier_call = kvm_clock_sync,
324};
325
Sean Christophersonb9904082020-03-21 13:25:55 -0700326int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100327{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200328 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100329 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200330 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
331 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200332 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
333 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100334 return 0;
335}
336
337void kvm_arch_hardware_unsetup(void)
338{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100339 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200340 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200341 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
342 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100343}
344
David Hildenbrand22be5a132016-01-21 13:22:54 +0100345static void allow_cpu_feat(unsigned long nr)
346{
347 set_bit_inv(nr, kvm_s390_available_cpu_feat);
348}
349
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350static inline int plo_test_bit(unsigned char nr)
351{
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200352 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100353 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200354
355 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200356 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200357 /* Parameter registers are ignored for "test bit" */
358 " plo 0,0,0,0(0)\n"
359 " ipm %0\n"
360 " srl %0,28\n"
361 : "=d" (cc)
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200362 : [function] "d" (function)
363 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200364 return cc == 0;
365}
366
Heiko Carstensd0dea732019-10-02 14:34:37 +0200367static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500368{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500369 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200370 " lghi 0,0\n"
371 " lgr 1,%[query]\n"
372 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500373 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200374 :
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200375 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
376 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500377}
378
Christian Borntraeger173aec22018-12-28 10:59:06 +0100379#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100380#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100381
David Hildenbrand22be5a132016-01-21 13:22:54 +0100382static void kvm_s390_cpu_feat_init(void)
383{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200384 int i;
385
386 for (i = 0; i < 256; ++i) {
387 if (plo_test_bit(i))
388 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
389 }
390
391 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400392 ptff(kvm_s390_available_subfunc.ptff,
393 sizeof(kvm_s390_available_subfunc.ptff),
394 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200395
396 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200397 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.kmac);
399 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kmc);
401 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.km);
403 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
404 kvm_s390_available_subfunc.kimd);
405 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 }
408 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200409 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
410 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200411 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200412 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmctr);
414 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.kmf);
416 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
417 kvm_s390_available_subfunc.kmo);
418 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
419 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420 }
421 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100422 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200423 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200424
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400425 if (test_facility(146)) /* MSA8 */
426 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kma);
428
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100429 if (test_facility(155)) /* MSA9 */
430 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
431 kvm_s390_available_subfunc.kdsa);
432
Christian Borntraeger173aec22018-12-28 10:59:06 +0100433 if (test_facility(150)) /* SORTL */
434 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
435
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100436 if (test_facility(151)) /* DFLTCC */
437 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
438
David Hildenbrand22be5a132016-01-21 13:22:54 +0100439 if (MACHINE_HAS_ESOP)
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200441 /*
442 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
443 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
444 */
445 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100446 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200447 return;
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100449 if (sclp.has_64bscao)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100451 if (sclp.has_siif)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100453 if (sclp.has_gpere)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100455 if (sclp.has_gsls)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100457 if (sclp.has_ib)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100459 if (sclp.has_cei)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100461 if (sclp.has_ibs)
462 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500463 if (sclp.has_kss)
464 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200465 /*
466 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
467 * all skey handling functions read/set the skey from the PGSTE
468 * instead of the real storage key.
469 *
470 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
471 * pages being detected as preserved although they are resident.
472 *
473 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
474 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
475 *
476 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
477 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
478 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
479 *
480 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
481 * cannot easily shadow the SCA because of the ipte lock.
482 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100483}
484
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100485int kvm_arch_init(void *opaque)
486{
Janosch Frankf76f6372019-10-02 03:56:27 -0400487 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100488
Christian Borntraeger78f26132015-07-22 15:50:58 +0200489 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf)
491 return -ENOMEM;
492
Janosch Frank3e6c5562019-10-02 04:46:58 -0400493 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
494 if (!kvm_s390_dbf_uv)
495 goto out;
496
497 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
498 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400499 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200500
David Hildenbrand22be5a132016-01-21 13:22:54 +0100501 kvm_s390_cpu_feat_init();
502
Cornelia Huck84877d92014-09-02 10:27:35 +0100503 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
505 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100506 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400507 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100508 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
510 rc = kvm_s390_gib_init(GAL_ISC);
511 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400512 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100513
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return 0;
515
Janosch Frankf76f6372019-10-02 03:56:27 -0400516out:
517 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100518 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100519}
520
Christian Borntraeger78f26132015-07-22 15:50:58 +0200521void kvm_arch_exit(void)
522{
Michael Mueller1282c212019-01-31 09:52:40 +0100523 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200524 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400525 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200526}
527
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100528/* Section: device related */
529long kvm_arch_dev_ioctl(struct file *filp,
530 unsigned int ioctl, unsigned long arg)
531{
532 if (ioctl == KVM_S390_ENABLE_SIE)
533 return s390_enable_sie();
534 return -EINVAL;
535}
536
Alexander Graf784aa3d2014-07-14 18:27:35 +0200537int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100538{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100539 int r;
540
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200541 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100542 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200543 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100544 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100545#ifdef CONFIG_KVM_S390_UCONTROL
546 case KVM_CAP_S390_UCONTROL:
547#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200548 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100549 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200550 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100551 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100552 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100553 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200554 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200555 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200556 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200557 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100558 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100559 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200560 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100561 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400562 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100563 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200564 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200565 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100566 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100567 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500568 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400569 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400570 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100571 r = 1;
572 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300573 case KVM_CAP_SET_GUEST_DEBUG2:
574 r = KVM_GUESTDBG_VALID_MASK;
575 break;
Janosch Franka4499382018-07-13 11:28:31 +0100576 case KVM_CAP_S390_HPAGE_1M:
577 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100578 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100579 r = 1;
580 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100581 case KVM_CAP_S390_MEM_OP:
582 r = MEM_OP_MAX_SIZE;
583 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200584 case KVM_CAP_NR_VCPUS:
585 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200586 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200588 if (!kvm_s390_use_sca_entries())
589 r = KVM_MAX_VCPUS;
590 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100591 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200592 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200593 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100594 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200595 break;
Eric Farman68c55752014-06-09 10:57:26 -0400596 case KVM_CAP_S390_VECTOR_REGISTERS:
597 r = MACHINE_HAS_VX;
598 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800599 case KVM_CAP_S390_RI:
600 r = test_facility(64);
601 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100602 case KVM_CAP_S390_GS:
603 r = test_facility(133);
604 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100605 case KVM_CAP_S390_BPB:
606 r = test_facility(82);
607 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500608 case KVM_CAP_S390_PROTECTED:
609 r = is_prot_virt_host();
610 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200611 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100612 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200613 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100614 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100615}
616
Sean Christopherson0dff0842020-02-18 13:07:29 -0800617void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618{
Janosch Frank0959e162018-07-17 13:21:22 +0100619 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100621 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400622 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100623 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400624
Janosch Frank0959e162018-07-17 13:21:22 +0100625 /* Loop over all guest segments */
626 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400627 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100628 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
629 gaddr = gfn_to_gpa(cur_gfn);
630 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
631 if (kvm_is_error_hva(vmaddr))
632 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400633
Janosch Frank0959e162018-07-17 13:21:22 +0100634 bitmap_zero(bitmap, _PAGE_ENTRIES);
635 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
636 for (i = 0; i < _PAGE_ENTRIES; i++) {
637 if (test_bit(i, bitmap))
638 mark_page_dirty(kvm, cur_gfn + i);
639 }
640
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100641 if (fatal_signal_pending(current))
642 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100643 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400644 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400645}
646
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100647/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200648static void sca_del_vcpu(struct kvm_vcpu *vcpu);
649
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100650/*
651 * Get (and clear) the dirty memory log for a memory slot.
652 */
653int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
654 struct kvm_dirty_log *log)
655{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400656 int r;
657 unsigned long n;
658 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800659 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400660
Janosch Franke1e8a962017-02-02 16:39:31 +0100661 if (kvm_is_ucontrol(kvm))
662 return -EINVAL;
663
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400664 mutex_lock(&kvm->slots_lock);
665
666 r = -EINVAL;
667 if (log->slot >= KVM_USER_MEM_SLOTS)
668 goto out;
669
Sean Christopherson2a49f612020-02-18 13:07:30 -0800670 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400671 if (r)
672 goto out;
673
674 /* Clear the dirty log */
675 if (is_dirty) {
676 n = kvm_dirty_bitmap_bytes(memslot);
677 memset(memslot->dirty_bitmap, 0, n);
678 }
679 r = 0;
680out:
681 mutex_unlock(&kvm->slots_lock);
682 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100683}
684
David Hildenbrand6502a342016-06-21 14:19:51 +0200685static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
686{
687 unsigned int i;
688 struct kvm_vcpu *vcpu;
689
690 kvm_for_each_vcpu(i, vcpu, kvm) {
691 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
692 }
693}
694
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100695int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200696{
697 int r;
698
699 if (cap->flags)
700 return -EINVAL;
701
702 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200703 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200704 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200705 kvm->arch.use_irqchip = 1;
706 r = 0;
707 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200708 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200709 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200710 kvm->arch.user_sigp = 1;
711 r = 0;
712 break;
Eric Farman68c55752014-06-09 10:57:26 -0400713 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200715 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100716 r = -EBUSY;
717 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100718 set_kvm_facility(kvm->arch.model.fac_mask, 129);
719 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200720 if (test_facility(134)) {
721 set_kvm_facility(kvm->arch.model.fac_mask, 134);
722 set_kvm_facility(kvm->arch.model.fac_list, 134);
723 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100724 if (test_facility(135)) {
725 set_kvm_facility(kvm->arch.model.fac_mask, 135);
726 set_kvm_facility(kvm->arch.model.fac_list, 135);
727 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100728 if (test_facility(148)) {
729 set_kvm_facility(kvm->arch.model.fac_mask, 148);
730 set_kvm_facility(kvm->arch.model.fac_list, 148);
731 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100732 if (test_facility(152)) {
733 set_kvm_facility(kvm->arch.model.fac_mask, 152);
734 set_kvm_facility(kvm->arch.model.fac_list, 152);
735 }
Christian Borntraeger1f703d22021-01-25 13:39:45 +0100736 if (test_facility(192)) {
737 set_kvm_facility(kvm->arch.model.fac_mask, 192);
738 set_kvm_facility(kvm->arch.model.fac_list, 192);
739 }
Michael Mueller18280d82015-03-16 16:05:41 +0100740 r = 0;
741 } else
742 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100743 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200744 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
745 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400746 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800747 case KVM_CAP_S390_RI:
748 r = -EINVAL;
749 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200750 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800751 r = -EBUSY;
752 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100753 set_kvm_facility(kvm->arch.model.fac_mask, 64);
754 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800755 r = 0;
756 }
757 mutex_unlock(&kvm->lock);
758 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
759 r ? "(not available)" : "(success)");
760 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100761 case KVM_CAP_S390_AIS:
762 mutex_lock(&kvm->lock);
763 if (kvm->created_vcpus) {
764 r = -EBUSY;
765 } else {
766 set_kvm_facility(kvm->arch.model.fac_mask, 72);
767 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100768 r = 0;
769 }
770 mutex_unlock(&kvm->lock);
771 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
772 r ? "(not available)" : "(success)");
773 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100774 case KVM_CAP_S390_GS:
775 r = -EINVAL;
776 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100777 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100778 r = -EBUSY;
779 } else if (test_facility(133)) {
780 set_kvm_facility(kvm->arch.model.fac_mask, 133);
781 set_kvm_facility(kvm->arch.model.fac_list, 133);
782 r = 0;
783 }
784 mutex_unlock(&kvm->lock);
785 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
786 r ? "(not available)" : "(success)");
787 break;
Janosch Franka4499382018-07-13 11:28:31 +0100788 case KVM_CAP_S390_HPAGE_1M:
789 mutex_lock(&kvm->lock);
790 if (kvm->created_vcpus)
791 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100792 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100793 r = -EINVAL;
794 else {
795 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700796 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100797 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700798 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100799 /*
800 * We might have to create fake 4k page
801 * tables. To avoid that the hardware works on
802 * stale PGSTEs, we emulate these instructions.
803 */
804 kvm->arch.use_skf = 0;
805 kvm->arch.use_pfmfi = 0;
806 }
807 mutex_unlock(&kvm->lock);
808 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
809 r ? "(not available)" : "(success)");
810 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100811 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200812 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100813 kvm->arch.user_stsi = 1;
814 r = 0;
815 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200816 case KVM_CAP_S390_USER_INSTR0:
817 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
818 kvm->arch.user_instr0 = 1;
819 icpt_operexc_on_all_vcpus(kvm);
820 r = 0;
821 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200822 default:
823 r = -EINVAL;
824 break;
825 }
826 return r;
827}
828
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100829static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
830{
831 int ret;
832
833 switch (attr->attr) {
834 case KVM_S390_VM_MEM_LIMIT_SIZE:
835 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200836 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100837 kvm->arch.mem_limit);
838 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100839 ret = -EFAULT;
840 break;
841 default:
842 ret = -ENXIO;
843 break;
844 }
845 return ret;
846}
847
848static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200849{
850 int ret;
851 unsigned int idx;
852 switch (attr->attr) {
853 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100854 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100855 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200856 break;
857
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200858 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200859 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100860 if (kvm->created_vcpus)
861 ret = -EBUSY;
862 else if (kvm->mm->context.allow_gmap_hpage_1m)
863 ret = -EINVAL;
864 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200865 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100866 /* Not compatible with cmma. */
867 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200868 ret = 0;
869 }
870 mutex_unlock(&kvm->lock);
871 break;
872 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100873 ret = -ENXIO;
874 if (!sclp.has_cmma)
875 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200876 ret = -EINVAL;
877 if (!kvm->arch.use_cmma)
878 break;
879
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200880 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200881 mutex_lock(&kvm->lock);
882 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200883 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200884 srcu_read_unlock(&kvm->srcu, idx);
885 mutex_unlock(&kvm->lock);
886 ret = 0;
887 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100888 case KVM_S390_VM_MEM_LIMIT_SIZE: {
889 unsigned long new_limit;
890
891 if (kvm_is_ucontrol(kvm))
892 return -EINVAL;
893
894 if (get_user(new_limit, (u64 __user *)attr->addr))
895 return -EFAULT;
896
Dominik Dingela3a92c32014-12-01 17:24:42 +0100897 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
898 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100899 return -E2BIG;
900
Dominik Dingela3a92c32014-12-01 17:24:42 +0100901 if (!new_limit)
902 return -EINVAL;
903
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100904 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100905 if (new_limit != KVM_S390_NO_MEM_LIMIT)
906 new_limit -= 1;
907
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100908 ret = -EBUSY;
909 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200910 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100911 /* gmap_create will round the limit up */
912 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100913
914 if (!new) {
915 ret = -ENOMEM;
916 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100917 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100918 new->private = kvm;
919 kvm->arch.gmap = new;
920 ret = 0;
921 }
922 }
923 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100924 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
925 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
926 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100927 break;
928 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200929 default:
930 ret = -ENXIO;
931 break;
932 }
933 return ret;
934}
935
Tony Krowiaka374e892014-09-03 10:13:53 +0200936static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
937
Tony Krowiak20c922f2018-04-22 11:37:03 -0400938void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200939{
940 struct kvm_vcpu *vcpu;
941 int i;
942
Tony Krowiak20c922f2018-04-22 11:37:03 -0400943 kvm_s390_vcpu_block_all(kvm);
944
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400945 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400946 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400947 /* recreate the shadow crycb by leaving the VSIE handler */
948 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
949 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400950
951 kvm_s390_vcpu_unblock_all(kvm);
952}
953
954static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
955{
Tony Krowiaka374e892014-09-03 10:13:53 +0200956 mutex_lock(&kvm->lock);
957 switch (attr->attr) {
958 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200959 if (!test_kvm_facility(kvm, 76)) {
960 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400961 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200962 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200963 get_random_bytes(
964 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
965 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
966 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200967 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200968 break;
969 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200970 if (!test_kvm_facility(kvm, 76)) {
971 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400972 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200973 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200974 get_random_bytes(
975 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
976 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
977 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200978 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200979 break;
980 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200981 if (!test_kvm_facility(kvm, 76)) {
982 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400983 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200984 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200985 kvm->arch.crypto.aes_kw = 0;
986 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
987 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200988 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200989 break;
990 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200991 if (!test_kvm_facility(kvm, 76)) {
992 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400993 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200994 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200995 kvm->arch.crypto.dea_kw = 0;
996 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
997 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200998 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200999 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001000 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1001 if (!ap_instructions_available()) {
1002 mutex_unlock(&kvm->lock);
1003 return -EOPNOTSUPP;
1004 }
1005 kvm->arch.crypto.apie = 1;
1006 break;
1007 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1008 if (!ap_instructions_available()) {
1009 mutex_unlock(&kvm->lock);
1010 return -EOPNOTSUPP;
1011 }
1012 kvm->arch.crypto.apie = 0;
1013 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001014 default:
1015 mutex_unlock(&kvm->lock);
1016 return -ENXIO;
1017 }
1018
Tony Krowiak20c922f2018-04-22 11:37:03 -04001019 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001020 mutex_unlock(&kvm->lock);
1021 return 0;
1022}
1023
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001024static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1025{
1026 int cx;
1027 struct kvm_vcpu *vcpu;
1028
1029 kvm_for_each_vcpu(cx, vcpu, kvm)
1030 kvm_s390_sync_request(req, vcpu);
1031}
1032
1033/*
1034 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001035 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001036 */
1037static int kvm_s390_vm_start_migration(struct kvm *kvm)
1038{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001039 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001041 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 int slotnr;
1043
1044 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001045 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001046 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 slots = kvm_memslots(kvm);
1048 if (!slots || !slots->used_slots)
1049 return -EINVAL;
1050
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001051 if (!kvm->arch.use_cmma) {
1052 kvm->arch.migration_mode = 1;
1053 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001055 /* mark all the pages in active slots as dirty */
1056 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1057 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001058 if (!ms->dirty_bitmap)
1059 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001060 /*
1061 * The second half of the bitmap is only used on x86,
1062 * and would be wasted otherwise, so we put it to good
1063 * use here to keep track of the state of the storage
1064 * attributes.
1065 */
1066 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1067 ram_pages += ms->npages;
1068 }
1069 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1070 kvm->arch.migration_mode = 1;
1071 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001072 return 0;
1073}
1074
1075/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001076 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001077 * kvm_s390_vm_start_migration.
1078 */
1079static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1080{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001081 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001082 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001083 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001084 kvm->arch.migration_mode = 0;
1085 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001086 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001087 return 0;
1088}
1089
1090static int kvm_s390_vm_set_migration(struct kvm *kvm,
1091 struct kvm_device_attr *attr)
1092{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001093 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001094
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001095 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001096 switch (attr->attr) {
1097 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001098 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001099 break;
1100 case KVM_S390_VM_MIGRATION_STOP:
1101 res = kvm_s390_vm_stop_migration(kvm);
1102 break;
1103 default:
1104 break;
1105 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001106 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001107
1108 return res;
1109}
1110
1111static int kvm_s390_vm_get_migration(struct kvm *kvm,
1112 struct kvm_device_attr *attr)
1113{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001114 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001115
1116 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1117 return -ENXIO;
1118
1119 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1120 return -EFAULT;
1121 return 0;
1122}
1123
Collin L. Walling8fa16962016-07-26 15:29:44 -04001124static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1125{
1126 struct kvm_s390_vm_tod_clock gtod;
1127
1128 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1129 return -EFAULT;
1130
David Hildenbrand0e7def52018-02-07 12:46:43 +01001131 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001132 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001133 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001134
1135 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1136 gtod.epoch_idx, gtod.tod);
1137
1138 return 0;
1139}
1140
Jason J. Herne72f25022014-11-25 09:46:02 -05001141static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1142{
1143 u8 gtod_high;
1144
1145 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1146 sizeof(gtod_high)))
1147 return -EFAULT;
1148
1149 if (gtod_high != 0)
1150 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001151 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001152
1153 return 0;
1154}
1155
1156static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1157{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001158 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001159
David Hildenbrand0e7def52018-02-07 12:46:43 +01001160 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1161 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001162 return -EFAULT;
1163
David Hildenbrand0e7def52018-02-07 12:46:43 +01001164 kvm_s390_set_tod_clock(kvm, &gtod);
1165 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001166 return 0;
1167}
1168
1169static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1170{
1171 int ret;
1172
1173 if (attr->flags)
1174 return -EINVAL;
1175
1176 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001177 case KVM_S390_VM_TOD_EXT:
1178 ret = kvm_s390_set_tod_ext(kvm, attr);
1179 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001180 case KVM_S390_VM_TOD_HIGH:
1181 ret = kvm_s390_set_tod_high(kvm, attr);
1182 break;
1183 case KVM_S390_VM_TOD_LOW:
1184 ret = kvm_s390_set_tod_low(kvm, attr);
1185 break;
1186 default:
1187 ret = -ENXIO;
1188 break;
1189 }
1190 return ret;
1191}
1192
David Hildenbrand33d1b272018-04-27 14:36:13 +02001193static void kvm_s390_get_tod_clock(struct kvm *kvm,
1194 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001195{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001196 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001197
1198 preempt_disable();
1199
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001200 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001201
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001202 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001203 gtod->epoch_idx = 0;
1204 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001205 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1206 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001207 gtod->epoch_idx += 1;
1208 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001209
1210 preempt_enable();
1211}
1212
1213static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1214{
1215 struct kvm_s390_vm_tod_clock gtod;
1216
1217 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001218 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001219 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1220 return -EFAULT;
1221
1222 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1223 gtod.epoch_idx, gtod.tod);
1224 return 0;
1225}
1226
Jason J. Herne72f25022014-11-25 09:46:02 -05001227static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1228{
1229 u8 gtod_high = 0;
1230
1231 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1232 sizeof(gtod_high)))
1233 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001234 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001235
1236 return 0;
1237}
1238
1239static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1240{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001241 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001242
David Hildenbrand60417fc2015-09-29 16:20:36 +02001243 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001244 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1245 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001246 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001247
1248 return 0;
1249}
1250
1251static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1252{
1253 int ret;
1254
1255 if (attr->flags)
1256 return -EINVAL;
1257
1258 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001259 case KVM_S390_VM_TOD_EXT:
1260 ret = kvm_s390_get_tod_ext(kvm, attr);
1261 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001262 case KVM_S390_VM_TOD_HIGH:
1263 ret = kvm_s390_get_tod_high(kvm, attr);
1264 break;
1265 case KVM_S390_VM_TOD_LOW:
1266 ret = kvm_s390_get_tod_low(kvm, attr);
1267 break;
1268 default:
1269 ret = -ENXIO;
1270 break;
1271 }
1272 return ret;
1273}
1274
Michael Mueller658b6ed2015-02-02 15:49:35 +01001275static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1276{
1277 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001278 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001279 int ret = 0;
1280
1281 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001282 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 ret = -EBUSY;
1284 goto out;
1285 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001286 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001287 if (!proc) {
1288 ret = -ENOMEM;
1289 goto out;
1290 }
1291 if (!copy_from_user(proc, (void __user *)attr->addr,
1292 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001293 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001294 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1295 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001296 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001297 if (proc->ibc > unblocked_ibc)
1298 kvm->arch.model.ibc = unblocked_ibc;
1299 else if (proc->ibc < lowest_ibc)
1300 kvm->arch.model.ibc = lowest_ibc;
1301 else
1302 kvm->arch.model.ibc = proc->ibc;
1303 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001304 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001305 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001306 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1307 kvm->arch.model.ibc,
1308 kvm->arch.model.cpuid);
1309 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1310 kvm->arch.model.fac_list[0],
1311 kvm->arch.model.fac_list[1],
1312 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001313 } else
1314 ret = -EFAULT;
1315 kfree(proc);
1316out:
1317 mutex_unlock(&kvm->lock);
1318 return ret;
1319}
1320
David Hildenbrand15c97052015-03-19 17:36:43 +01001321static int kvm_s390_set_processor_feat(struct kvm *kvm,
1322 struct kvm_device_attr *attr)
1323{
1324 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001325
1326 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1327 return -EFAULT;
1328 if (!bitmap_subset((unsigned long *) data.feat,
1329 kvm_s390_available_cpu_feat,
1330 KVM_S390_VM_CPU_FEAT_NR_BITS))
1331 return -EINVAL;
1332
1333 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001334 if (kvm->created_vcpus) {
1335 mutex_unlock(&kvm->lock);
1336 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001337 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001338 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1339 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001340 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001341 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1342 data.feat[0],
1343 data.feat[1],
1344 data.feat[2]);
1345 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001346}
1347
David Hildenbrand0a763c72016-05-18 16:03:47 +02001348static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1349 struct kvm_device_attr *attr)
1350{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001351 mutex_lock(&kvm->lock);
1352 if (kvm->created_vcpus) {
1353 mutex_unlock(&kvm->lock);
1354 return -EBUSY;
1355 }
1356
1357 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1358 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1359 mutex_unlock(&kvm->lock);
1360 return -EFAULT;
1361 }
1362 mutex_unlock(&kvm->lock);
1363
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001364 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1369 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1372 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1375 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1378 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1379 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1381 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1382 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1384 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1385 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1387 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1388 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1390 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1391 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1393 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1394 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1396 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1397 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1398 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1399 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1400 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1401 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1402 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1403 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1404 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1405 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1406 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001408 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1409 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1410 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001411 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1414 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1415 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001416 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1419 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1420 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001421
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001422 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001423}
1424
Michael Mueller658b6ed2015-02-02 15:49:35 +01001425static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1426{
1427 int ret = -ENXIO;
1428
1429 switch (attr->attr) {
1430 case KVM_S390_VM_CPU_PROCESSOR:
1431 ret = kvm_s390_set_processor(kvm, attr);
1432 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001433 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1434 ret = kvm_s390_set_processor_feat(kvm, attr);
1435 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001436 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1437 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1438 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001439 }
1440 return ret;
1441}
1442
1443static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1444{
1445 struct kvm_s390_vm_cpu_processor *proc;
1446 int ret = 0;
1447
Christian Borntraegerc4196212020-11-06 08:34:23 +01001448 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001449 if (!proc) {
1450 ret = -ENOMEM;
1451 goto out;
1452 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001453 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001454 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001455 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1456 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001457 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1458 kvm->arch.model.ibc,
1459 kvm->arch.model.cpuid);
1460 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1461 kvm->arch.model.fac_list[0],
1462 kvm->arch.model.fac_list[1],
1463 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001464 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1465 ret = -EFAULT;
1466 kfree(proc);
1467out:
1468 return ret;
1469}
1470
1471static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1472{
1473 struct kvm_s390_vm_cpu_machine *mach;
1474 int ret = 0;
1475
Christian Borntraegerc4196212020-11-06 08:34:23 +01001476 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001477 if (!mach) {
1478 ret = -ENOMEM;
1479 goto out;
1480 }
1481 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001482 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001483 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001484 S390_ARCH_FAC_LIST_SIZE_BYTE);
Sven Schnelle17e89e12021-05-05 22:01:10 +02001485 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1486 sizeof(stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001487 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1488 kvm->arch.model.ibc,
1489 kvm->arch.model.cpuid);
1490 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1491 mach->fac_mask[0],
1492 mach->fac_mask[1],
1493 mach->fac_mask[2]);
1494 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1495 mach->fac_list[0],
1496 mach->fac_list[1],
1497 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001498 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1499 ret = -EFAULT;
1500 kfree(mach);
1501out:
1502 return ret;
1503}
1504
David Hildenbrand15c97052015-03-19 17:36:43 +01001505static int kvm_s390_get_processor_feat(struct kvm *kvm,
1506 struct kvm_device_attr *attr)
1507{
1508 struct kvm_s390_vm_cpu_feat data;
1509
1510 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1511 KVM_S390_VM_CPU_FEAT_NR_BITS);
1512 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1513 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001514 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1515 data.feat[0],
1516 data.feat[1],
1517 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001518 return 0;
1519}
1520
1521static int kvm_s390_get_machine_feat(struct kvm *kvm,
1522 struct kvm_device_attr *attr)
1523{
1524 struct kvm_s390_vm_cpu_feat data;
1525
1526 bitmap_copy((unsigned long *) data.feat,
1527 kvm_s390_available_cpu_feat,
1528 KVM_S390_VM_CPU_FEAT_NR_BITS);
1529 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1530 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001531 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1532 data.feat[0],
1533 data.feat[1],
1534 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001535 return 0;
1536}
1537
David Hildenbrand0a763c72016-05-18 16:03:47 +02001538static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1539 struct kvm_device_attr *attr)
1540{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001541 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1542 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1543 return -EFAULT;
1544
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001545 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1550 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1553 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1556 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1559 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1562 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1565 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1566 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1568 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1571 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1572 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1574 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1575 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1577 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1578 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1579 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1580 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1581 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1582 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1583 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1585 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1586 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1587 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001589 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1591 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001592 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1595 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1596 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001597 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1600 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1601 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001602
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001603 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001604}
1605
1606static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1607 struct kvm_device_attr *attr)
1608{
1609 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1610 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1611 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001612
1613 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1616 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1617 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1618 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1621 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1624 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1627 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1628 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1629 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1630 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1631 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1632 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1633 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1634 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1635 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1636 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1637 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1638 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1639 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1640 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1641 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1642 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1643 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1644 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1645 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1646 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1647 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1648 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1649 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1650 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1651 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1652 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1653 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1654 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1655 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1656 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001657 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1658 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1659 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001660 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1661 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1662 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1663 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1664 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001665 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1666 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1667 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1668 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1669 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001670
David Hildenbrand0a763c72016-05-18 16:03:47 +02001671 return 0;
1672}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001673
Michael Mueller658b6ed2015-02-02 15:49:35 +01001674static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1675{
1676 int ret = -ENXIO;
1677
1678 switch (attr->attr) {
1679 case KVM_S390_VM_CPU_PROCESSOR:
1680 ret = kvm_s390_get_processor(kvm, attr);
1681 break;
1682 case KVM_S390_VM_CPU_MACHINE:
1683 ret = kvm_s390_get_machine(kvm, attr);
1684 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001685 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1686 ret = kvm_s390_get_processor_feat(kvm, attr);
1687 break;
1688 case KVM_S390_VM_CPU_MACHINE_FEAT:
1689 ret = kvm_s390_get_machine_feat(kvm, attr);
1690 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001691 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1692 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1693 break;
1694 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1695 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1696 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001697 }
1698 return ret;
1699}
1700
Dominik Dingelf2061652014-04-09 13:13:00 +02001701static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1702{
1703 int ret;
1704
1705 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001706 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001707 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001708 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001709 case KVM_S390_VM_TOD:
1710 ret = kvm_s390_set_tod(kvm, attr);
1711 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001712 case KVM_S390_VM_CPU_MODEL:
1713 ret = kvm_s390_set_cpu_model(kvm, attr);
1714 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001715 case KVM_S390_VM_CRYPTO:
1716 ret = kvm_s390_vm_set_crypto(kvm, attr);
1717 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001718 case KVM_S390_VM_MIGRATION:
1719 ret = kvm_s390_vm_set_migration(kvm, attr);
1720 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001721 default:
1722 ret = -ENXIO;
1723 break;
1724 }
1725
1726 return ret;
1727}
1728
1729static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1730{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001731 int ret;
1732
1733 switch (attr->group) {
1734 case KVM_S390_VM_MEM_CTRL:
1735 ret = kvm_s390_get_mem_control(kvm, attr);
1736 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001737 case KVM_S390_VM_TOD:
1738 ret = kvm_s390_get_tod(kvm, attr);
1739 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001740 case KVM_S390_VM_CPU_MODEL:
1741 ret = kvm_s390_get_cpu_model(kvm, attr);
1742 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001743 case KVM_S390_VM_MIGRATION:
1744 ret = kvm_s390_vm_get_migration(kvm, attr);
1745 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001746 default:
1747 ret = -ENXIO;
1748 break;
1749 }
1750
1751 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001752}
1753
1754static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1755{
1756 int ret;
1757
1758 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001759 case KVM_S390_VM_MEM_CTRL:
1760 switch (attr->attr) {
1761 case KVM_S390_VM_MEM_ENABLE_CMMA:
1762 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001763 ret = sclp.has_cmma ? 0 : -ENXIO;
1764 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001765 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001766 ret = 0;
1767 break;
1768 default:
1769 ret = -ENXIO;
1770 break;
1771 }
1772 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001773 case KVM_S390_VM_TOD:
1774 switch (attr->attr) {
1775 case KVM_S390_VM_TOD_LOW:
1776 case KVM_S390_VM_TOD_HIGH:
1777 ret = 0;
1778 break;
1779 default:
1780 ret = -ENXIO;
1781 break;
1782 }
1783 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001784 case KVM_S390_VM_CPU_MODEL:
1785 switch (attr->attr) {
1786 case KVM_S390_VM_CPU_PROCESSOR:
1787 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001788 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1789 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001790 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001791 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001792 ret = 0;
1793 break;
1794 default:
1795 ret = -ENXIO;
1796 break;
1797 }
1798 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001799 case KVM_S390_VM_CRYPTO:
1800 switch (attr->attr) {
1801 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1802 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1803 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1804 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1805 ret = 0;
1806 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001807 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1808 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1809 ret = ap_instructions_available() ? 0 : -ENXIO;
1810 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001811 default:
1812 ret = -ENXIO;
1813 break;
1814 }
1815 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001816 case KVM_S390_VM_MIGRATION:
1817 ret = 0;
1818 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001819 default:
1820 ret = -ENXIO;
1821 break;
1822 }
1823
1824 return ret;
1825}
1826
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001827static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1828{
1829 uint8_t *keys;
1830 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001831 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001832
1833 if (args->flags != 0)
1834 return -EINVAL;
1835
1836 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001837 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001838 return KVM_S390_GET_SKEYS_NONE;
1839
1840 /* Enforce sane limit on memory allocation */
1841 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1842 return -EINVAL;
1843
Christian Borntraegerc4196212020-11-06 08:34:23 +01001844 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001845 if (!keys)
1846 return -ENOMEM;
1847
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001848 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001849 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001850 for (i = 0; i < args->count; i++) {
1851 hva = gfn_to_hva(kvm, args->start_gfn + i);
1852 if (kvm_is_error_hva(hva)) {
1853 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001854 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001855 }
1856
David Hildenbrand154c8c12016-05-09 11:22:34 +02001857 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1858 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001859 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001860 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001861 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001862 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001863
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001864 if (!r) {
1865 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1866 sizeof(uint8_t) * args->count);
1867 if (r)
1868 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001869 }
1870
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001871 kvfree(keys);
1872 return r;
1873}
1874
1875static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1876{
1877 uint8_t *keys;
1878 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001879 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001880 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001881
1882 if (args->flags != 0)
1883 return -EINVAL;
1884
1885 /* Enforce sane limit on memory allocation */
1886 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1887 return -EINVAL;
1888
Christian Borntraegerc4196212020-11-06 08:34:23 +01001889 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001890 if (!keys)
1891 return -ENOMEM;
1892
1893 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1894 sizeof(uint8_t) * args->count);
1895 if (r) {
1896 r = -EFAULT;
1897 goto out;
1898 }
1899
1900 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001901 r = s390_enable_skey();
1902 if (r)
1903 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001904
Janosch Frankbd096f62018-07-18 13:40:22 +01001905 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001906 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001907 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001908 while (i < args->count) {
1909 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001910 hva = gfn_to_hva(kvm, args->start_gfn + i);
1911 if (kvm_is_error_hva(hva)) {
1912 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001913 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001914 }
1915
1916 /* Lowest order bit is reserved */
1917 if (keys[i] & 0x01) {
1918 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001919 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001920 }
1921
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001922 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001923 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001924 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001925 FAULT_FLAG_WRITE, &unlocked);
1926 if (r)
1927 break;
1928 }
1929 if (!r)
1930 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001931 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001932 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001933 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001934out:
1935 kvfree(keys);
1936 return r;
1937}
1938
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001939/*
1940 * Base address and length must be sent at the start of each block, therefore
1941 * it's cheaper to send some clean data, as long as it's less than the size of
1942 * two longs.
1943 */
1944#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1945/* for consistency */
1946#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1947
1948/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001949 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1950 * address falls in a hole. In that case the index of one of the memslots
1951 * bordering the hole is returned.
1952 */
1953static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1954{
1955 int start = 0, end = slots->used_slots;
1956 int slot = atomic_read(&slots->lru_slot);
1957 struct kvm_memory_slot *memslots = slots->memslots;
1958
1959 if (gfn >= memslots[slot].base_gfn &&
1960 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1961 return slot;
1962
1963 while (start < end) {
1964 slot = start + (end - start) / 2;
1965
1966 if (gfn >= memslots[slot].base_gfn)
1967 end = slot;
1968 else
1969 start = slot + 1;
1970 }
1971
Sean Christopherson97daa022020-04-07 23:40:59 -07001972 if (start >= slots->used_slots)
1973 return slots->used_slots - 1;
1974
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001975 if (gfn >= memslots[start].base_gfn &&
1976 gfn < memslots[start].base_gfn + memslots[start].npages) {
1977 atomic_set(&slots->lru_slot, start);
1978 }
1979
1980 return start;
1981}
1982
1983static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1984 u8 *res, unsigned long bufsize)
1985{
1986 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1987
1988 args->count = 0;
1989 while (args->count < bufsize) {
1990 hva = gfn_to_hva(kvm, cur_gfn);
1991 /*
1992 * We return an error if the first value was invalid, but we
1993 * return successfully if at least one value was copied.
1994 */
1995 if (kvm_is_error_hva(hva))
1996 return args->count ? 0 : -EFAULT;
1997 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1998 pgstev = 0;
1999 res[args->count++] = (pgstev >> 24) & 0x43;
2000 cur_gfn++;
2001 }
2002
2003 return 0;
2004}
2005
2006static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2007 unsigned long cur_gfn)
2008{
2009 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2010 struct kvm_memory_slot *ms = slots->memslots + slotidx;
2011 unsigned long ofs = cur_gfn - ms->base_gfn;
2012
2013 if (ms->base_gfn + ms->npages <= cur_gfn) {
2014 slotidx--;
2015 /* If we are above the highest slot, wrap around */
2016 if (slotidx < 0)
2017 slotidx = slots->used_slots - 1;
2018
2019 ms = slots->memslots + slotidx;
2020 ofs = 0;
2021 }
2022 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2023 while ((slotidx > 0) && (ofs >= ms->npages)) {
2024 slotidx--;
2025 ms = slots->memslots + slotidx;
2026 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2027 }
2028 return ms->base_gfn + ofs;
2029}
2030
2031static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2032 u8 *res, unsigned long bufsize)
2033{
2034 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2035 struct kvm_memslots *slots = kvm_memslots(kvm);
2036 struct kvm_memory_slot *ms;
2037
Sean Christopherson0774a962020-03-20 13:55:40 -07002038 if (unlikely(!slots->used_slots))
2039 return 0;
2040
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002041 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2042 ms = gfn_to_memslot(kvm, cur_gfn);
2043 args->count = 0;
2044 args->start_gfn = cur_gfn;
2045 if (!ms)
2046 return 0;
2047 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2048 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2049
2050 while (args->count < bufsize) {
2051 hva = gfn_to_hva(kvm, cur_gfn);
2052 if (kvm_is_error_hva(hva))
2053 return 0;
2054 /* Decrement only if we actually flipped the bit to 0 */
2055 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2056 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2057 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2058 pgstev = 0;
2059 /* Save the value */
2060 res[args->count++] = (pgstev >> 24) & 0x43;
2061 /* If the next bit is too far away, stop. */
2062 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2063 return 0;
2064 /* If we reached the previous "next", find the next one */
2065 if (cur_gfn == next_gfn)
2066 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2067 /* Reached the end of memory or of the buffer, stop */
2068 if ((next_gfn >= mem_end) ||
2069 (next_gfn - args->start_gfn >= bufsize))
2070 return 0;
2071 cur_gfn++;
2072 /* Reached the end of the current memslot, take the next one. */
2073 if (cur_gfn - ms->base_gfn >= ms->npages) {
2074 ms = gfn_to_memslot(kvm, cur_gfn);
2075 if (!ms)
2076 return 0;
2077 }
2078 }
2079 return 0;
2080}
2081
2082/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002083 * This function searches for the next page with dirty CMMA attributes, and
2084 * saves the attributes in the buffer up to either the end of the buffer or
2085 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2086 * no trailing clean bytes are saved.
2087 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2088 * output buffer will indicate 0 as length.
2089 */
2090static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2091 struct kvm_s390_cmma_log *args)
2092{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 unsigned long bufsize;
2094 int srcu_idx, peek, ret;
2095 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002096
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002097 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002098 return -ENXIO;
2099 /* Invalid/unsupported flags were specified */
2100 if (args->flags & ~KVM_S390_CMMA_PEEK)
2101 return -EINVAL;
2102 /* Migration mode query, and we are not doing a migration */
2103 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002104 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002105 return -EINVAL;
2106 /* CMMA is disabled or was not used, or the buffer has length zero */
2107 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002108 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002109 memset(args, 0, sizeof(*args));
2110 return 0;
2111 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002112 /* We are not peeking, and there are no dirty pages */
2113 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2114 memset(args, 0, sizeof(*args));
2115 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002116 }
2117
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002118 values = vmalloc(bufsize);
2119 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002120 return -ENOMEM;
2121
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002122 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002123 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002124 if (peek)
2125 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2126 else
2127 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002128 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002129 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002130
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002131 if (kvm->arch.migration_mode)
2132 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2133 else
2134 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002135
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002136 if (copy_to_user((void __user *)args->values, values, args->count))
2137 ret = -EFAULT;
2138
2139 vfree(values);
2140 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002141}
2142
2143/*
2144 * This function sets the CMMA attributes for the given pages. If the input
2145 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002146 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002147 */
2148static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2149 const struct kvm_s390_cmma_log *args)
2150{
2151 unsigned long hva, mask, pgstev, i;
2152 uint8_t *bits;
2153 int srcu_idx, r = 0;
2154
2155 mask = args->mask;
2156
2157 if (!kvm->arch.use_cmma)
2158 return -ENXIO;
2159 /* invalid/unsupported flags */
2160 if (args->flags != 0)
2161 return -EINVAL;
2162 /* Enforce sane limit on memory allocation */
2163 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2164 return -EINVAL;
2165 /* Nothing to do */
2166 if (args->count == 0)
2167 return 0;
2168
Kees Cook42bc47b2018-06-12 14:27:11 -07002169 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002170 if (!bits)
2171 return -ENOMEM;
2172
2173 r = copy_from_user(bits, (void __user *)args->values, args->count);
2174 if (r) {
2175 r = -EFAULT;
2176 goto out;
2177 }
2178
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002179 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002180 srcu_idx = srcu_read_lock(&kvm->srcu);
2181 for (i = 0; i < args->count; i++) {
2182 hva = gfn_to_hva(kvm, args->start_gfn + i);
2183 if (kvm_is_error_hva(hva)) {
2184 r = -EFAULT;
2185 break;
2186 }
2187
2188 pgstev = bits[i];
2189 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002190 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002191 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2192 }
2193 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002194 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002195
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002196 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002197 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002198 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002199 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002200 }
2201out:
2202 vfree(bits);
2203 return r;
2204}
2205
Janosch Frank29b40f12019-09-30 04:19:18 -04002206static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2207{
2208 struct kvm_vcpu *vcpu;
2209 u16 rc, rrc;
2210 int ret = 0;
2211 int i;
2212
2213 /*
2214 * We ignore failures and try to destroy as many CPUs as possible.
2215 * At the same time we must not free the assigned resources when
2216 * this fails, as the ultravisor has still access to that memory.
2217 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2218 * behind.
2219 * We want to return the first failure rc and rrc, though.
2220 */
2221 kvm_for_each_vcpu(i, vcpu, kvm) {
2222 mutex_lock(&vcpu->mutex);
2223 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2224 *rcp = rc;
2225 *rrcp = rrc;
2226 ret = -EIO;
2227 }
2228 mutex_unlock(&vcpu->mutex);
2229 }
2230 return ret;
2231}
2232
2233static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2234{
2235 int i, r = 0;
2236 u16 dummy;
2237
2238 struct kvm_vcpu *vcpu;
2239
2240 kvm_for_each_vcpu(i, vcpu, kvm) {
2241 mutex_lock(&vcpu->mutex);
2242 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2243 mutex_unlock(&vcpu->mutex);
2244 if (r)
2245 break;
2246 }
2247 if (r)
2248 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2249 return r;
2250}
2251
2252static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2253{
2254 int r = 0;
2255 u16 dummy;
2256 void __user *argp = (void __user *)cmd->data;
2257
2258 switch (cmd->cmd) {
2259 case KVM_PV_ENABLE: {
2260 r = -EINVAL;
2261 if (kvm_s390_pv_is_protected(kvm))
2262 break;
2263
2264 /*
2265 * FMT 4 SIE needs esca. As we never switch back to bsca from
2266 * esca, we need no cleanup in the error cases below
2267 */
2268 r = sca_switch_to_extended(kvm);
2269 if (r)
2270 break;
2271
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002272 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002273 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002274 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002275 if (r)
2276 break;
2277
Janosch Frank29b40f12019-09-30 04:19:18 -04002278 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2279 if (r)
2280 break;
2281
2282 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2283 if (r)
2284 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002285
2286 /* we need to block service interrupts from now on */
2287 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002288 break;
2289 }
2290 case KVM_PV_DISABLE: {
2291 r = -EINVAL;
2292 if (!kvm_s390_pv_is_protected(kvm))
2293 break;
2294
2295 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2296 /*
2297 * If a CPU could not be destroyed, destroy VM will also fail.
2298 * There is no point in trying to destroy it. Instead return
2299 * the rc and rrc from the first CPU that failed destroying.
2300 */
2301 if (r)
2302 break;
2303 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002304
2305 /* no need to block service interrupts any more */
2306 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002307 break;
2308 }
2309 case KVM_PV_SET_SEC_PARMS: {
2310 struct kvm_s390_pv_sec_parm parms = {};
2311 void *hdr;
2312
2313 r = -EINVAL;
2314 if (!kvm_s390_pv_is_protected(kvm))
2315 break;
2316
2317 r = -EFAULT;
2318 if (copy_from_user(&parms, argp, sizeof(parms)))
2319 break;
2320
2321 /* Currently restricted to 8KB */
2322 r = -EINVAL;
2323 if (parms.length > PAGE_SIZE * 2)
2324 break;
2325
2326 r = -ENOMEM;
2327 hdr = vmalloc(parms.length);
2328 if (!hdr)
2329 break;
2330
2331 r = -EFAULT;
2332 if (!copy_from_user(hdr, (void __user *)parms.origin,
2333 parms.length))
2334 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2335 &cmd->rc, &cmd->rrc);
2336
2337 vfree(hdr);
2338 break;
2339 }
2340 case KVM_PV_UNPACK: {
2341 struct kvm_s390_pv_unp unp = {};
2342
2343 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002344 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002345 break;
2346
2347 r = -EFAULT;
2348 if (copy_from_user(&unp, argp, sizeof(unp)))
2349 break;
2350
2351 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2352 &cmd->rc, &cmd->rrc);
2353 break;
2354 }
2355 case KVM_PV_VERIFY: {
2356 r = -EINVAL;
2357 if (!kvm_s390_pv_is_protected(kvm))
2358 break;
2359
2360 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2361 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2362 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2363 cmd->rrc);
2364 break;
2365 }
Janosch Franke0d27732019-05-09 13:07:21 +02002366 case KVM_PV_PREP_RESET: {
2367 r = -EINVAL;
2368 if (!kvm_s390_pv_is_protected(kvm))
2369 break;
2370
2371 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2372 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2373 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2374 cmd->rc, cmd->rrc);
2375 break;
2376 }
2377 case KVM_PV_UNSHARE_ALL: {
2378 r = -EINVAL;
2379 if (!kvm_s390_pv_is_protected(kvm))
2380 break;
2381
2382 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2383 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2384 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2385 cmd->rc, cmd->rrc);
2386 break;
2387 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002388 default:
2389 r = -ENOTTY;
2390 }
2391 return r;
2392}
2393
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002394long kvm_arch_vm_ioctl(struct file *filp,
2395 unsigned int ioctl, unsigned long arg)
2396{
2397 struct kvm *kvm = filp->private_data;
2398 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002399 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002400 int r;
2401
2402 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002403 case KVM_S390_INTERRUPT: {
2404 struct kvm_s390_interrupt s390int;
2405
2406 r = -EFAULT;
2407 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2408 break;
2409 r = kvm_s390_inject_vm(kvm, &s390int);
2410 break;
2411 }
Cornelia Huck84223592013-07-15 13:36:01 +02002412 case KVM_CREATE_IRQCHIP: {
2413 struct kvm_irq_routing_entry routing;
2414
2415 r = -EINVAL;
2416 if (kvm->arch.use_irqchip) {
2417 /* Set up dummy routing. */
2418 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002419 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002420 }
2421 break;
2422 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002423 case KVM_SET_DEVICE_ATTR: {
2424 r = -EFAULT;
2425 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2426 break;
2427 r = kvm_s390_vm_set_attr(kvm, &attr);
2428 break;
2429 }
2430 case KVM_GET_DEVICE_ATTR: {
2431 r = -EFAULT;
2432 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2433 break;
2434 r = kvm_s390_vm_get_attr(kvm, &attr);
2435 break;
2436 }
2437 case KVM_HAS_DEVICE_ATTR: {
2438 r = -EFAULT;
2439 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2440 break;
2441 r = kvm_s390_vm_has_attr(kvm, &attr);
2442 break;
2443 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002444 case KVM_S390_GET_SKEYS: {
2445 struct kvm_s390_skeys args;
2446
2447 r = -EFAULT;
2448 if (copy_from_user(&args, argp,
2449 sizeof(struct kvm_s390_skeys)))
2450 break;
2451 r = kvm_s390_get_skeys(kvm, &args);
2452 break;
2453 }
2454 case KVM_S390_SET_SKEYS: {
2455 struct kvm_s390_skeys args;
2456
2457 r = -EFAULT;
2458 if (copy_from_user(&args, argp,
2459 sizeof(struct kvm_s390_skeys)))
2460 break;
2461 r = kvm_s390_set_skeys(kvm, &args);
2462 break;
2463 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002464 case KVM_S390_GET_CMMA_BITS: {
2465 struct kvm_s390_cmma_log args;
2466
2467 r = -EFAULT;
2468 if (copy_from_user(&args, argp, sizeof(args)))
2469 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002470 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002471 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002472 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002473 if (!r) {
2474 r = copy_to_user(argp, &args, sizeof(args));
2475 if (r)
2476 r = -EFAULT;
2477 }
2478 break;
2479 }
2480 case KVM_S390_SET_CMMA_BITS: {
2481 struct kvm_s390_cmma_log args;
2482
2483 r = -EFAULT;
2484 if (copy_from_user(&args, argp, sizeof(args)))
2485 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002486 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002487 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002488 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002489 break;
2490 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002491 case KVM_S390_PV_COMMAND: {
2492 struct kvm_pv_cmd args;
2493
Janosch Frankfe28c7862019-05-15 13:24:30 +02002494 /* protvirt means user sigp */
2495 kvm->arch.user_cpu_state_ctrl = 1;
Janosch Frank29b40f12019-09-30 04:19:18 -04002496 r = 0;
2497 if (!is_prot_virt_host()) {
2498 r = -EINVAL;
2499 break;
2500 }
2501 if (copy_from_user(&args, argp, sizeof(args))) {
2502 r = -EFAULT;
2503 break;
2504 }
2505 if (args.flags) {
2506 r = -EINVAL;
2507 break;
2508 }
2509 mutex_lock(&kvm->lock);
2510 r = kvm_s390_handle_pv(kvm, &args);
2511 mutex_unlock(&kvm->lock);
2512 if (copy_to_user(argp, &args, sizeof(args))) {
2513 r = -EFAULT;
2514 break;
2515 }
2516 break;
2517 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002518 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002519 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002520 }
2521
2522 return r;
2523}
2524
Tony Krowiak45c9b472015-01-13 11:33:26 -05002525static int kvm_s390_apxa_installed(void)
2526{
Tony Krowiake585b242018-09-25 19:16:18 -04002527 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002528
Tony Krowiake585b242018-09-25 19:16:18 -04002529 if (ap_instructions_available()) {
2530 if (ap_qci(&info) == 0)
2531 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002532 }
2533
2534 return 0;
2535}
2536
Tony Krowiake585b242018-09-25 19:16:18 -04002537/*
2538 * The format of the crypto control block (CRYCB) is specified in the 3 low
2539 * order bits of the CRYCB designation (CRYCBD) field as follows:
2540 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2541 * AP extended addressing (APXA) facility are installed.
2542 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2543 * Format 2: Both the APXA and MSAX3 facilities are installed
2544 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002545static void kvm_s390_set_crycb_format(struct kvm *kvm)
2546{
2547 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2548
Tony Krowiake585b242018-09-25 19:16:18 -04002549 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2550 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2551
2552 /* Check whether MSAX3 is installed */
2553 if (!test_kvm_facility(kvm, 76))
2554 return;
2555
Tony Krowiak45c9b472015-01-13 11:33:26 -05002556 if (kvm_s390_apxa_installed())
2557 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2558 else
2559 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2560}
2561
Pierre Morel0e237e42018-10-05 10:31:09 +02002562void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2563 unsigned long *aqm, unsigned long *adm)
2564{
2565 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2566
2567 mutex_lock(&kvm->lock);
2568 kvm_s390_vcpu_block_all(kvm);
2569
2570 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2571 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2572 memcpy(crycb->apcb1.apm, apm, 32);
2573 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2574 apm[0], apm[1], apm[2], apm[3]);
2575 memcpy(crycb->apcb1.aqm, aqm, 32);
2576 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2577 aqm[0], aqm[1], aqm[2], aqm[3]);
2578 memcpy(crycb->apcb1.adm, adm, 32);
2579 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2580 adm[0], adm[1], adm[2], adm[3]);
2581 break;
2582 case CRYCB_FORMAT1:
2583 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2584 memcpy(crycb->apcb0.apm, apm, 8);
2585 memcpy(crycb->apcb0.aqm, aqm, 2);
2586 memcpy(crycb->apcb0.adm, adm, 2);
2587 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2588 apm[0], *((unsigned short *)aqm),
2589 *((unsigned short *)adm));
2590 break;
2591 default: /* Can not happen */
2592 break;
2593 }
2594
2595 /* recreate the shadow crycb for each vcpu */
2596 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2597 kvm_s390_vcpu_unblock_all(kvm);
2598 mutex_unlock(&kvm->lock);
2599}
2600EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2601
Tony Krowiak421045982018-09-25 19:16:25 -04002602void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2603{
2604 mutex_lock(&kvm->lock);
2605 kvm_s390_vcpu_block_all(kvm);
2606
2607 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2608 sizeof(kvm->arch.crypto.crycb->apcb0));
2609 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2610 sizeof(kvm->arch.crypto.crycb->apcb1));
2611
Pierre Morel0e237e42018-10-05 10:31:09 +02002612 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002613 /* recreate the shadow crycb for each vcpu */
2614 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002615 kvm_s390_vcpu_unblock_all(kvm);
2616 mutex_unlock(&kvm->lock);
2617}
2618EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2619
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002620static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002621{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002622 struct cpuid cpuid;
2623
2624 get_cpu_id(&cpuid);
2625 cpuid.version = 0xff;
2626 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002627}
2628
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002629static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002630{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002631 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002632 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002633
Tony Krowiake585b242018-09-25 19:16:18 -04002634 if (!test_kvm_facility(kvm, 76))
2635 return;
2636
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002637 /* Enable AES/DEA protected key functions by default */
2638 kvm->arch.crypto.aes_kw = 1;
2639 kvm->arch.crypto.dea_kw = 1;
2640 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2641 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2642 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2643 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002644}
2645
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002646static void sca_dispose(struct kvm *kvm)
2647{
2648 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002649 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002650 else
2651 free_page((unsigned long)(kvm->arch.sca));
2652 kvm->arch.sca = NULL;
2653}
2654
Carsten Ottee08b9632012-01-04 10:25:20 +01002655int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002656{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002657 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002658 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002659 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002660 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002661
Carsten Ottee08b9632012-01-04 10:25:20 +01002662 rc = -EINVAL;
2663#ifdef CONFIG_KVM_S390_UCONTROL
2664 if (type & ~KVM_VM_S390_UCONTROL)
2665 goto out_err;
2666 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2667 goto out_err;
2668#else
2669 if (type)
2670 goto out_err;
2671#endif
2672
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002673 rc = s390_enable_sie();
2674 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002675 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002676
Carsten Otteb2904112011-10-18 12:27:13 +02002677 rc = -ENOMEM;
2678
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002679 if (!sclp.has_64bscao)
2680 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002681 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002682 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002683 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002684 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002685 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002686 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002687 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002688 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002689 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002690 kvm->arch.sca = (struct bsca_block *)
2691 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002692 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693
2694 sprintf(debug_name, "kvm-%u", current->pid);
2695
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002696 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002697 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002698 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002699
Michael Mueller19114be2017-05-30 14:26:02 +02002700 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002701 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002702 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002703 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002704 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002705
Michael Mueller25c84db2019-01-31 09:52:41 +01002706 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002707 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002708
2709 for (i = 0; i < kvm_s390_fac_size(); i++) {
Sven Schnelle17e89e12021-05-05 22:01:10 +02002710 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002711 (kvm_s390_fac_base[i] |
2712 kvm_s390_fac_ext[i]);
Sven Schnelle17e89e12021-05-05 22:01:10 +02002713 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002714 kvm_s390_fac_base[i];
2715 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002716 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002717
David Hildenbrand19352222017-08-29 16:31:08 +02002718 /* we are always in czam mode - even on pre z14 machines */
2719 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2720 set_kvm_facility(kvm->arch.model.fac_list, 138);
2721 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002722 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2723 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002724 if (MACHINE_HAS_TLB_GUEST) {
2725 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2726 set_kvm_facility(kvm->arch.model.fac_list, 147);
2727 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002728
Pierre Morel05f31e32019-05-21 17:34:37 +02002729 if (css_general_characteristics.aiv && test_facility(65))
2730 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2731
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002732 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002733 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002734
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002735 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002736
Fei Li51978392017-02-17 17:06:26 +08002737 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002738 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002739 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2740 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002741 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002742 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002743
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002744 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002745 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002746
Carsten Ottee08b9632012-01-04 10:25:20 +01002747 if (type & KVM_VM_S390_UCONTROL) {
2748 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002749 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002750 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002751 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002752 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002753 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002754 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002755 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002756 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002757 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002758 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002759 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002760 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002761 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002762
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002763 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002764 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002765 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002766 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002767 if (use_gisa)
2768 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002769 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002770
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002771 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002772out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002773 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002774 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002775 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002776 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002777 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002778}
2779
Christian Borntraegerd329c032008-11-26 14:50:27 +01002780void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2781{
Janosch Frank29b40f12019-09-30 04:19:18 -04002782 u16 rc, rrc;
2783
Christian Borntraegerd329c032008-11-26 14:50:27 +01002784 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002785 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002786 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002787 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002788 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002789 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002790
2791 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002792 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002793
Dominik Dingele6db1d62015-05-07 15:41:57 +02002794 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002795 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002796 /* We can not hold the vcpu mutex here, we are already dying */
2797 if (kvm_s390_pv_cpu_get_handle(vcpu))
2798 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002799 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002800}
2801
2802static void kvm_free_vcpus(struct kvm *kvm)
2803{
2804 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002805 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002806
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002807 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002808 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002809
2810 mutex_lock(&kvm->lock);
2811 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2812 kvm->vcpus[i] = NULL;
2813
2814 atomic_set(&kvm->online_vcpus, 0);
2815 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002816}
2817
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002818void kvm_arch_destroy_vm(struct kvm *kvm)
2819{
Janosch Frank29b40f12019-09-30 04:19:18 -04002820 u16 rc, rrc;
2821
Christian Borntraegerd329c032008-11-26 14:50:27 +01002822 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002823 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002824 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002825 /*
2826 * We are already at the end of life and kvm->lock is not taken.
2827 * This is ok as the file descriptor is closed by now and nobody
2828 * can mess with the pv state. To avoid lockdep_assert_held from
2829 * complaining we do not use kvm_s390_pv_is_protected.
2830 */
2831 if (kvm_s390_pv_get_handle(kvm))
2832 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2833 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002834 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002835 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002836 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002837 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002838 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002839 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002840 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002841}
2842
2843/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002844static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2845{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002846 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002847 if (!vcpu->arch.gmap)
2848 return -ENOMEM;
2849 vcpu->arch.gmap->private = vcpu->kvm;
2850
2851 return 0;
2852}
2853
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002854static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2855{
David Hildenbranda6940672016-08-08 22:39:32 +02002856 if (!kvm_s390_use_sca_entries())
2857 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002858 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002859 if (vcpu->kvm->arch.use_esca) {
2860 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002861
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002862 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002863 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002864 } else {
2865 struct bsca_block *sca = vcpu->kvm->arch.sca;
2866
2867 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002868 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002869 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002870 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002871}
2872
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002873static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002874{
David Hildenbranda6940672016-08-08 22:39:32 +02002875 if (!kvm_s390_use_sca_entries()) {
2876 struct bsca_block *sca = vcpu->kvm->arch.sca;
2877
2878 /* we still need the basic sca for the ipte control */
2879 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2880 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002881 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002882 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002883 read_lock(&vcpu->kvm->arch.sca_lock);
2884 if (vcpu->kvm->arch.use_esca) {
2885 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002886
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002887 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002888 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2889 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002890 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002891 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002892 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002893 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002894
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002895 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002896 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2897 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002898 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002899 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002900 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002901}
2902
2903/* Basic SCA to Extended SCA data copy routines */
2904static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2905{
2906 d->sda = s->sda;
2907 d->sigp_ctrl.c = s->sigp_ctrl.c;
2908 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2909}
2910
2911static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2912{
2913 int i;
2914
2915 d->ipte_control = s->ipte_control;
2916 d->mcn[0] = s->mcn;
2917 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2918 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2919}
2920
2921static int sca_switch_to_extended(struct kvm *kvm)
2922{
2923 struct bsca_block *old_sca = kvm->arch.sca;
2924 struct esca_block *new_sca;
2925 struct kvm_vcpu *vcpu;
2926 unsigned int vcpu_idx;
2927 u32 scaol, scaoh;
2928
Janosch Frank29b40f12019-09-30 04:19:18 -04002929 if (kvm->arch.use_esca)
2930 return 0;
2931
Christian Borntraegerc4196212020-11-06 08:34:23 +01002932 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002933 if (!new_sca)
2934 return -ENOMEM;
2935
2936 scaoh = (u32)((u64)(new_sca) >> 32);
2937 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2938
2939 kvm_s390_vcpu_block_all(kvm);
2940 write_lock(&kvm->arch.sca_lock);
2941
2942 sca_copy_b_to_e(new_sca, old_sca);
2943
2944 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2945 vcpu->arch.sie_block->scaoh = scaoh;
2946 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002947 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002948 }
2949 kvm->arch.sca = new_sca;
2950 kvm->arch.use_esca = 1;
2951
2952 write_unlock(&kvm->arch.sca_lock);
2953 kvm_s390_vcpu_unblock_all(kvm);
2954
2955 free_page((unsigned long)old_sca);
2956
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002957 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2958 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002959 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002960}
2961
2962static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2963{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002964 int rc;
2965
David Hildenbranda6940672016-08-08 22:39:32 +02002966 if (!kvm_s390_use_sca_entries()) {
2967 if (id < KVM_MAX_VCPUS)
2968 return true;
2969 return false;
2970 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002971 if (id < KVM_S390_BSCA_CPU_SLOTS)
2972 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002973 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002974 return false;
2975
2976 mutex_lock(&kvm->lock);
2977 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2978 mutex_unlock(&kvm->lock);
2979
2980 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002981}
2982
David Hildenbranddb0758b2016-02-15 09:42:25 +01002983/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2984static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2985{
2986 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002987 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002988 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002989 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002990}
2991
2992/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2993static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2994{
2995 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002996 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002997 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2998 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002999 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003000}
3001
3002/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3003static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3004{
3005 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3006 vcpu->arch.cputm_enabled = true;
3007 __start_cpu_timer_accounting(vcpu);
3008}
3009
3010/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3011static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3012{
3013 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3014 __stop_cpu_timer_accounting(vcpu);
3015 vcpu->arch.cputm_enabled = false;
3016}
3017
3018static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3019{
3020 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3021 __enable_cpu_timer_accounting(vcpu);
3022 preempt_enable();
3023}
3024
3025static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3026{
3027 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3028 __disable_cpu_timer_accounting(vcpu);
3029 preempt_enable();
3030}
3031
David Hildenbrand4287f242016-02-15 09:40:12 +01003032/* set the cpu timer - may only be called from the VCPU thread itself */
3033void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3034{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003035 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003036 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003037 if (vcpu->arch.cputm_enabled)
3038 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003039 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003040 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003041 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003042}
3043
David Hildenbranddb0758b2016-02-15 09:42:25 +01003044/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003045__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3046{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003047 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003048 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003049
3050 if (unlikely(!vcpu->arch.cputm_enabled))
3051 return vcpu->arch.sie_block->cputm;
3052
David Hildenbrand9c23a132016-02-17 21:53:33 +01003053 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3054 do {
3055 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3056 /*
3057 * If the writer would ever execute a read in the critical
3058 * section, e.g. in irq context, we have a deadlock.
3059 */
3060 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3061 value = vcpu->arch.sie_block->cputm;
3062 /* if cputm_start is 0, accounting is being started/stopped */
3063 if (likely(vcpu->arch.cputm_start))
3064 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3065 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3066 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003067 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003068}
3069
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003070void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3071{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003072
David Hildenbrand37d9df92015-03-11 16:47:33 +01003073 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003074 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003075 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003076 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003077 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003078}
3079
3080void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3081{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003082 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003083 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003084 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003085 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003086 vcpu->arch.enabled_gmap = gmap_get_enabled();
3087 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003088
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003089}
3090
Dominik Dingel31928aa2014-12-04 15:47:07 +01003091void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003092{
Jason J. Herne72f25022014-11-25 09:46:02 -05003093 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003094 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003095 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003096 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003097 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003098 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003099 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003100 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003101 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003102 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003103 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3104 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003105 /* make vcpu_load load the right gmap on the first trigger */
3106 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003107}
3108
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003109static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3110{
3111 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3112 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3113 return true;
3114 return false;
3115}
3116
3117static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3118{
3119 /* At least one ECC subfunction must be present */
3120 return kvm_has_pckmo_subfunc(kvm, 32) ||
3121 kvm_has_pckmo_subfunc(kvm, 33) ||
3122 kvm_has_pckmo_subfunc(kvm, 34) ||
3123 kvm_has_pckmo_subfunc(kvm, 40) ||
3124 kvm_has_pckmo_subfunc(kvm, 41);
3125
3126}
3127
Tony Krowiak5102ee82014-06-27 14:46:01 -04003128static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3129{
Tony Krowiake585b242018-09-25 19:16:18 -04003130 /*
3131 * If the AP instructions are not being interpreted and the MSAX3
3132 * facility is not configured for the guest, there is nothing to set up.
3133 */
3134 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003135 return;
3136
Tony Krowiake585b242018-09-25 19:16:18 -04003137 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003138 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003139 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003140 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003141
Tony Krowiake585b242018-09-25 19:16:18 -04003142 if (vcpu->kvm->arch.crypto.apie)
3143 vcpu->arch.sie_block->eca |= ECA_APIE;
3144
3145 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003146 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003147 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003148 /* ecc is also wrapped with AES key */
3149 if (kvm_has_pckmo_ecc(vcpu->kvm))
3150 vcpu->arch.sie_block->ecd |= ECD_ECC;
3151 }
3152
Tony Krowiaka374e892014-09-03 10:13:53 +02003153 if (vcpu->kvm->arch.crypto.dea_kw)
3154 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003155}
3156
Dominik Dingelb31605c2014-03-25 13:47:11 +01003157void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3158{
3159 free_page(vcpu->arch.sie_block->cbrlo);
3160 vcpu->arch.sie_block->cbrlo = 0;
3161}
3162
3163int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3164{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003165 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003166 if (!vcpu->arch.sie_block->cbrlo)
3167 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003168 return 0;
3169}
3170
Michael Mueller91520f12015-02-27 14:32:11 +01003171static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3172{
3173 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3174
Michael Mueller91520f12015-02-27 14:32:11 +01003175 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003176 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003177 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003178}
3179
Sean Christophersonff72bb52019-12-18 13:55:20 -08003180static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3181{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003182 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003183 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003184
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003185 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3186 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003187 CPUSTAT_STOPPED);
3188
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003189 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003190 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003191 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003192 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003193
Michael Mueller91520f12015-02-27 14:32:11 +01003194 kvm_s390_vcpu_setup_model(vcpu);
3195
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003196 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3197 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003198 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003199 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003200 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003201 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003202 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003203
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003204 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003205 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003206 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003207 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3208 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003209 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003210 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003211 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003212 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003213 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003214 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003215 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003216 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003217 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003218 vcpu->arch.sie_block->eca |= ECA_VX;
3219 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003220 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003221 if (test_kvm_facility(vcpu->kvm, 139))
3222 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003223 if (test_kvm_facility(vcpu->kvm, 156))
3224 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003225 if (vcpu->arch.sie_block->gd) {
3226 vcpu->arch.sie_block->eca |= ECA_AIV;
3227 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3228 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3229 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003230 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3231 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003232 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003233
3234 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003235 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003236 else
3237 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003238
Dominik Dingele6db1d62015-05-07 15:41:57 +02003239 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003240 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3241 if (rc)
3242 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003243 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003244 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003245 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003246
Collin Walling67d49d52018-08-31 12:51:19 -04003247 vcpu->arch.sie_block->hpid = HPID_KVM;
3248
Tony Krowiak5102ee82014-06-27 14:46:01 -04003249 kvm_s390_vcpu_crypto_setup(vcpu);
3250
Janosch Frank29b40f12019-09-30 04:19:18 -04003251 mutex_lock(&vcpu->kvm->lock);
3252 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3253 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3254 if (rc)
3255 kvm_s390_vcpu_unsetup_cmma(vcpu);
3256 }
3257 mutex_unlock(&vcpu->kvm->lock);
3258
Dominik Dingelb31605c2014-03-25 13:47:11 +01003259 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003260}
3261
Sean Christopherson897cc382019-12-18 13:55:09 -08003262int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3263{
3264 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3265 return -EINVAL;
3266 return 0;
3267}
3268
Sean Christophersone529ef62019-12-18 13:55:15 -08003269int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003270{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003271 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003272 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003273
QingFeng Haoda72ca42017-06-07 11:41:19 +02003274 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003275 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003276 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003277 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003278
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003279 vcpu->arch.sie_block = &sie_page->sie_block;
3280 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3281
David Hildenbrandefed1102015-04-16 12:32:41 +02003282 /* the real guest size will always be smaller than msl */
3283 vcpu->arch.sie_block->mso = 0;
3284 vcpu->arch.sie_block->msl = sclp.hamax;
3285
Sean Christophersone529ef62019-12-18 13:55:15 -08003286 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003287 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003288 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003289 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3290 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003291 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003292
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003293 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3294 kvm_clear_async_pf_completion_queue(vcpu);
3295 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3296 KVM_SYNC_GPRS |
3297 KVM_SYNC_ACRS |
3298 KVM_SYNC_CRS |
3299 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003300 KVM_SYNC_PFAULT |
3301 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003302 kvm_s390_set_prefix(vcpu, 0);
3303 if (test_kvm_facility(vcpu->kvm, 64))
3304 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3305 if (test_kvm_facility(vcpu->kvm, 82))
3306 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3307 if (test_kvm_facility(vcpu->kvm, 133))
3308 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3309 if (test_kvm_facility(vcpu->kvm, 156))
3310 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3311 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3312 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3313 */
3314 if (MACHINE_HAS_VX)
3315 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3316 else
3317 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3318
3319 if (kvm_is_ucontrol(vcpu->kvm)) {
3320 rc = __kvm_ucontrol_vcpu_init(vcpu);
3321 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003322 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003323 }
3324
Sean Christophersone529ef62019-12-18 13:55:15 -08003325 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3326 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3327 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003328
Sean Christophersonff72bb52019-12-18 13:55:20 -08003329 rc = kvm_s390_vcpu_setup(vcpu);
3330 if (rc)
3331 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003332 return 0;
3333
Sean Christophersonff72bb52019-12-18 13:55:20 -08003334out_ucontrol_uninit:
3335 if (kvm_is_ucontrol(vcpu->kvm))
3336 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003337out_free_sie_block:
3338 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003339 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003340}
3341
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003342int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3343{
David Hildenbrand9a022062014-08-05 17:40:47 +02003344 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003345}
3346
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003347bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3348{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003349 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003350}
3351
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003352void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003353{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003354 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003355 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003356}
3357
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003358void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003359{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003360 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003361}
3362
Christian Borntraeger8e236542015-04-09 13:49:04 +02003363static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3364{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003365 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003366 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003367}
3368
David Hildenbrand9ea59722018-09-25 19:16:16 -04003369bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3370{
3371 return atomic_read(&vcpu->arch.sie_block->prog20) &
3372 (PROG_BLOCK_SIE | PROG_REQUEST);
3373}
3374
Christian Borntraeger8e236542015-04-09 13:49:04 +02003375static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3376{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003377 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003378}
3379
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003380/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003381 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003382 * If the CPU is not running (e.g. waiting as idle) the function will
3383 * return immediately. */
3384void exit_sie(struct kvm_vcpu *vcpu)
3385{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003386 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003387 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003388 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3389 cpu_relax();
3390}
3391
Christian Borntraeger8e236542015-04-09 13:49:04 +02003392/* Kick a guest cpu out of SIE to process a request synchronously */
3393void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003394{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003395 kvm_make_request(req, vcpu);
3396 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003397}
3398
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003399static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3400 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003401{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003402 struct kvm *kvm = gmap->private;
3403 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003404 unsigned long prefix;
3405 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003406
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003407 if (gmap_is_shadow(gmap))
3408 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003409 if (start >= 1UL << 31)
3410 /* We are only interested in prefix pages */
3411 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003412 kvm_for_each_vcpu(i, vcpu, kvm) {
3413 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003414 prefix = kvm_s390_get_prefix(vcpu);
3415 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3416 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3417 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003418 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003419 }
3420 }
3421}
3422
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003423bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3424{
3425 /* do not poll with more than halt_poll_max_steal percent of steal time */
3426 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3427 halt_poll_max_steal) {
3428 vcpu->stat.halt_no_poll_steal++;
3429 return true;
3430 }
3431 return false;
3432}
3433
Christoffer Dallb6d33832012-03-08 16:44:24 -05003434int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3435{
3436 /* kvm common code refers to this, but never calls it */
3437 BUG();
3438 return 0;
3439}
3440
Carsten Otte14eebd92012-05-15 14:15:26 +02003441static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3442 struct kvm_one_reg *reg)
3443{
3444 int r = -EINVAL;
3445
3446 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003447 case KVM_REG_S390_TODPR:
3448 r = put_user(vcpu->arch.sie_block->todpr,
3449 (u32 __user *)reg->addr);
3450 break;
3451 case KVM_REG_S390_EPOCHDIFF:
3452 r = put_user(vcpu->arch.sie_block->epoch,
3453 (u64 __user *)reg->addr);
3454 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003455 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003456 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003457 (u64 __user *)reg->addr);
3458 break;
3459 case KVM_REG_S390_CLOCK_COMP:
3460 r = put_user(vcpu->arch.sie_block->ckc,
3461 (u64 __user *)reg->addr);
3462 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003463 case KVM_REG_S390_PFTOKEN:
3464 r = put_user(vcpu->arch.pfault_token,
3465 (u64 __user *)reg->addr);
3466 break;
3467 case KVM_REG_S390_PFCOMPARE:
3468 r = put_user(vcpu->arch.pfault_compare,
3469 (u64 __user *)reg->addr);
3470 break;
3471 case KVM_REG_S390_PFSELECT:
3472 r = put_user(vcpu->arch.pfault_select,
3473 (u64 __user *)reg->addr);
3474 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003475 case KVM_REG_S390_PP:
3476 r = put_user(vcpu->arch.sie_block->pp,
3477 (u64 __user *)reg->addr);
3478 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003479 case KVM_REG_S390_GBEA:
3480 r = put_user(vcpu->arch.sie_block->gbea,
3481 (u64 __user *)reg->addr);
3482 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003483 default:
3484 break;
3485 }
3486
3487 return r;
3488}
3489
3490static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3491 struct kvm_one_reg *reg)
3492{
3493 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003494 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003495
3496 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003497 case KVM_REG_S390_TODPR:
3498 r = get_user(vcpu->arch.sie_block->todpr,
3499 (u32 __user *)reg->addr);
3500 break;
3501 case KVM_REG_S390_EPOCHDIFF:
3502 r = get_user(vcpu->arch.sie_block->epoch,
3503 (u64 __user *)reg->addr);
3504 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003505 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003506 r = get_user(val, (u64 __user *)reg->addr);
3507 if (!r)
3508 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003509 break;
3510 case KVM_REG_S390_CLOCK_COMP:
3511 r = get_user(vcpu->arch.sie_block->ckc,
3512 (u64 __user *)reg->addr);
3513 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003514 case KVM_REG_S390_PFTOKEN:
3515 r = get_user(vcpu->arch.pfault_token,
3516 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003517 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3518 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003519 break;
3520 case KVM_REG_S390_PFCOMPARE:
3521 r = get_user(vcpu->arch.pfault_compare,
3522 (u64 __user *)reg->addr);
3523 break;
3524 case KVM_REG_S390_PFSELECT:
3525 r = get_user(vcpu->arch.pfault_select,
3526 (u64 __user *)reg->addr);
3527 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003528 case KVM_REG_S390_PP:
3529 r = get_user(vcpu->arch.sie_block->pp,
3530 (u64 __user *)reg->addr);
3531 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003532 case KVM_REG_S390_GBEA:
3533 r = get_user(vcpu->arch.sie_block->gbea,
3534 (u64 __user *)reg->addr);
3535 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003536 default:
3537 break;
3538 }
3539
3540 return r;
3541}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003542
Janosch Frank7de3f142020-01-31 05:02:02 -05003543static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003544{
Janosch Frank7de3f142020-01-31 05:02:02 -05003545 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3546 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3547 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3548
3549 kvm_clear_async_pf_completion_queue(vcpu);
3550 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3551 kvm_s390_vcpu_stop(vcpu);
3552 kvm_s390_clear_local_irqs(vcpu);
3553}
3554
3555static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3556{
3557 /* Initial reset is a superset of the normal reset */
3558 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3559
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003560 /*
3561 * This equals initial cpu reset in pop, but we don't switch to ESA.
3562 * We do not only reset the internal data, but also ...
3563 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003564 vcpu->arch.sie_block->gpsw.mask = 0;
3565 vcpu->arch.sie_block->gpsw.addr = 0;
3566 kvm_s390_set_prefix(vcpu, 0);
3567 kvm_s390_set_cpu_timer(vcpu, 0);
3568 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003569 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3570 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3571 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003572
3573 /* ... the data in sync regs */
3574 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3575 vcpu->run->s.regs.ckc = 0;
3576 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3577 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3578 vcpu->run->psw_addr = 0;
3579 vcpu->run->psw_mask = 0;
3580 vcpu->run->s.regs.todpr = 0;
3581 vcpu->run->s.regs.cputm = 0;
3582 vcpu->run->s.regs.ckc = 0;
3583 vcpu->run->s.regs.pp = 0;
3584 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003585 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003586 /*
3587 * Do not reset these registers in the protected case, as some of
3588 * them are overlayed and they are not accessible in this case
3589 * anyway.
3590 */
3591 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3592 vcpu->arch.sie_block->gbea = 1;
3593 vcpu->arch.sie_block->pp = 0;
3594 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3595 vcpu->arch.sie_block->todpr = 0;
3596 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003597}
3598
3599static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3600{
3601 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3602
3603 /* Clear reset is a superset of the initial reset */
3604 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3605
3606 memset(&regs->gprs, 0, sizeof(regs->gprs));
3607 memset(&regs->vrs, 0, sizeof(regs->vrs));
3608 memset(&regs->acrs, 0, sizeof(regs->acrs));
3609 memset(&regs->gscb, 0, sizeof(regs->gscb));
3610
3611 regs->etoken = 0;
3612 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003613}
3614
3615int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3616{
Christoffer Dall875656f2017-12-04 21:35:27 +01003617 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003618 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003619 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003620 return 0;
3621}
3622
3623int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3624{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003625 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003626 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003627 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003628 return 0;
3629}
3630
3631int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3632 struct kvm_sregs *sregs)
3633{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003634 vcpu_load(vcpu);
3635
Christian Borntraeger59674c12012-01-11 11:20:33 +01003636 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003637 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003638
3639 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003640 return 0;
3641}
3642
3643int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3644 struct kvm_sregs *sregs)
3645{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003646 vcpu_load(vcpu);
3647
Christian Borntraeger59674c12012-01-11 11:20:33 +01003648 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003649 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003650
3651 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003652 return 0;
3653}
3654
3655int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3656{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003657 int ret = 0;
3658
3659 vcpu_load(vcpu);
3660
3661 if (test_fp_ctl(fpu->fpc)) {
3662 ret = -EINVAL;
3663 goto out;
3664 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003665 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003666 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003667 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3668 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003669 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003670 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003671
3672out:
3673 vcpu_put(vcpu);
3674 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003675}
3676
3677int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3678{
Christoffer Dall13931232017-12-04 21:35:34 +01003679 vcpu_load(vcpu);
3680
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003681 /* make sure we have the latest values */
3682 save_fpu_regs();
3683 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003684 convert_vx_to_fp((freg_t *) fpu->fprs,
3685 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003686 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003687 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003688 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003689
3690 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003691 return 0;
3692}
3693
3694static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3695{
3696 int rc = 0;
3697
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003698 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003699 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003700 else {
3701 vcpu->run->psw_mask = psw.mask;
3702 vcpu->run->psw_addr = psw.addr;
3703 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003704 return rc;
3705}
3706
3707int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3708 struct kvm_translation *tr)
3709{
3710 return -EINVAL; /* not implemented yet */
3711}
3712
David Hildenbrand27291e22014-01-23 12:26:52 +01003713#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3714 KVM_GUESTDBG_USE_HW_BP | \
3715 KVM_GUESTDBG_ENABLE)
3716
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003717int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3718 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003719{
David Hildenbrand27291e22014-01-23 12:26:52 +01003720 int rc = 0;
3721
Christoffer Dall66b56562017-12-04 21:35:33 +01003722 vcpu_load(vcpu);
3723
David Hildenbrand27291e22014-01-23 12:26:52 +01003724 vcpu->guest_debug = 0;
3725 kvm_s390_clear_bp_data(vcpu);
3726
Christoffer Dall66b56562017-12-04 21:35:33 +01003727 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3728 rc = -EINVAL;
3729 goto out;
3730 }
3731 if (!sclp.has_gpere) {
3732 rc = -EINVAL;
3733 goto out;
3734 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003735
3736 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3737 vcpu->guest_debug = dbg->control;
3738 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003739 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003740
3741 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3742 rc = kvm_s390_import_bp_data(vcpu, dbg);
3743 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003744 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003745 vcpu->arch.guestdbg.last_bp = 0;
3746 }
3747
3748 if (rc) {
3749 vcpu->guest_debug = 0;
3750 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003751 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003752 }
3753
Christoffer Dall66b56562017-12-04 21:35:33 +01003754out:
3755 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003756 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003757}
3758
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003759int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3760 struct kvm_mp_state *mp_state)
3761{
Christoffer Dallfd232562017-12-04 21:35:30 +01003762 int ret;
3763
3764 vcpu_load(vcpu);
3765
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003766 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003767 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3768 KVM_MP_STATE_OPERATING;
3769
3770 vcpu_put(vcpu);
3771 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003772}
3773
3774int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3775 struct kvm_mp_state *mp_state)
3776{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003777 int rc = 0;
3778
Christoffer Dalle83dff52017-12-04 21:35:31 +01003779 vcpu_load(vcpu);
3780
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003781 /* user space knows about this interface - let it control the state */
3782 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3783
3784 switch (mp_state->mp_state) {
3785 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003786 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003787 break;
3788 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003789 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003790 break;
3791 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003792 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3793 rc = -ENXIO;
3794 break;
3795 }
3796 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3797 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003798 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003799 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003800 default:
3801 rc = -ENXIO;
3802 }
3803
Christoffer Dalle83dff52017-12-04 21:35:31 +01003804 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003805 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003806}
3807
David Hildenbrand8ad35752014-03-14 11:00:21 +01003808static bool ibs_enabled(struct kvm_vcpu *vcpu)
3809{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003810 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003811}
3812
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003813static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3814{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003815retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003816 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003817 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003818 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003819 /*
3820 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003821 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003822 * This ensures that the ipte instruction for this request has
3823 * already finished. We might race against a second unmapper that
3824 * wants to set the blocking bit. Lets just retry the request loop.
3825 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003826 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003827 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003828 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3829 kvm_s390_get_prefix(vcpu),
3830 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003831 if (rc) {
3832 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003833 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003834 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003835 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003836 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003837
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003838 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3839 vcpu->arch.sie_block->ihcpu = 0xffff;
3840 goto retry;
3841 }
3842
David Hildenbrand8ad35752014-03-14 11:00:21 +01003843 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3844 if (!ibs_enabled(vcpu)) {
3845 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003846 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003847 }
3848 goto retry;
3849 }
3850
3851 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3852 if (ibs_enabled(vcpu)) {
3853 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003854 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003855 }
3856 goto retry;
3857 }
3858
David Hildenbrand6502a342016-06-21 14:19:51 +02003859 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3860 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3861 goto retry;
3862 }
3863
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003864 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3865 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003866 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003867 * instruction manually, in order to provide additional
3868 * functionalities needed for live migration.
3869 */
3870 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3871 goto retry;
3872 }
3873
3874 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3875 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003876 * Re-enable CMM virtualization if CMMA is available and
3877 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003878 */
3879 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003880 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003881 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3882 goto retry;
3883 }
3884
David Hildenbrand0759d062014-05-13 16:54:32 +02003885 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003886 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003887 /* we left the vsie handler, nothing to do, just clear the request */
3888 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003889
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003890 return 0;
3891}
3892
David Hildenbrand0e7def52018-02-07 12:46:43 +01003893void kvm_s390_set_tod_clock(struct kvm *kvm,
3894 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003895{
3896 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003897 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003898 int i;
3899
3900 mutex_lock(&kvm->lock);
3901 preempt_disable();
3902
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003903 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003904
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003905 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003906 kvm->arch.epdx = 0;
3907 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003908 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003909 if (kvm->arch.epoch > gtod->tod)
3910 kvm->arch.epdx -= 1;
3911 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003912
3913 kvm_s390_vcpu_block_all(kvm);
3914 kvm_for_each_vcpu(i, vcpu, kvm) {
3915 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3916 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3917 }
3918
3919 kvm_s390_vcpu_unblock_all(kvm);
3920 preempt_enable();
3921 mutex_unlock(&kvm->lock);
3922}
3923
Thomas Huthfa576c52014-05-06 17:20:16 +02003924/**
3925 * kvm_arch_fault_in_page - fault-in guest page if necessary
3926 * @vcpu: The corresponding virtual cpu
3927 * @gpa: Guest physical address
3928 * @writable: Whether the page should be writable or not
3929 *
3930 * Make sure that a guest page has been faulted-in on the host.
3931 *
3932 * Return: Zero on success, negative error code otherwise.
3933 */
3934long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003935{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003936 return gmap_fault(vcpu->arch.gmap, gpa,
3937 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003938}
3939
Dominik Dingel3c038e62013-10-07 17:11:48 +02003940static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3941 unsigned long token)
3942{
3943 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003944 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003945
3946 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003947 irq.u.ext.ext_params2 = token;
3948 irq.type = KVM_S390_INT_PFAULT_INIT;
3949 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003950 } else {
3951 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003952 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003953 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3954 }
3955}
3956
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003957bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003958 struct kvm_async_pf *work)
3959{
3960 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3961 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003962
3963 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003964}
3965
3966void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3967 struct kvm_async_pf *work)
3968{
3969 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3970 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3971}
3972
3973void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3974 struct kvm_async_pf *work)
3975{
3976 /* s390 will always inject the page directly */
3977}
3978
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003979bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003980{
3981 /*
3982 * s390 will always inject the page directly,
3983 * but we still want check_async_completion to cleanup
3984 */
3985 return true;
3986}
3987
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003988static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003989{
3990 hva_t hva;
3991 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003992
3993 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003994 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003995 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3996 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003997 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003998 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003999 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02004000 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004001 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02004002 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004003 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004004 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004005 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004006
Heiko Carstens81480cc2014-01-01 16:36:07 +01004007 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4008 hva += current->thread.gmap_addr & ~PAGE_MASK;
4009 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004010 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004011
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004012 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02004013}
4014
Thomas Huth3fb4c402013-09-12 10:33:43 +02004015static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004016{
Thomas Huth3fb4c402013-09-12 10:33:43 +02004017 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01004018
Dominik Dingel3c038e62013-10-07 17:11:48 +02004019 /*
4020 * On s390 notifications for arriving pages will be delivered directly
4021 * to the guest but the house keeping for completed pfaults is
4022 * handled outside the worker.
4023 */
4024 kvm_check_async_pf_completion(vcpu);
4025
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004026 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4027 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004028
4029 if (need_resched())
4030 schedule();
4031
Jens Freimann79395032014-04-17 10:10:30 +02004032 if (!kvm_is_ucontrol(vcpu->kvm)) {
4033 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4034 if (rc)
4035 return rc;
4036 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004037
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004038 rc = kvm_s390_handle_requests(vcpu);
4039 if (rc)
4040 return rc;
4041
David Hildenbrand27291e22014-01-23 12:26:52 +01004042 if (guestdbg_enabled(vcpu)) {
4043 kvm_s390_backup_guest_per_regs(vcpu);
4044 kvm_s390_patch_guest_per_regs(vcpu);
4045 }
4046
Michael Mueller9f30f622019-01-31 09:52:44 +01004047 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
4048
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004049 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004050 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4051 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4052 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004053
Thomas Huth3fb4c402013-09-12 10:33:43 +02004054 return 0;
4055}
4056
Thomas Huth492d8642015-02-10 16:11:01 +01004057static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4058{
David Hildenbrand56317922016-01-12 17:37:58 +01004059 struct kvm_s390_pgm_info pgm_info = {
4060 .code = PGM_ADDRESSING,
4061 };
4062 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004063 int rc;
4064
4065 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4066 trace_kvm_s390_sie_fault(vcpu);
4067
4068 /*
4069 * We want to inject an addressing exception, which is defined as a
4070 * suppressing or terminating exception. However, since we came here
4071 * by a DAT access exception, the PSW still points to the faulting
4072 * instruction since DAT exceptions are nullifying. So we've got
4073 * to look up the current opcode to get the length of the instruction
4074 * to be able to forward the PSW.
4075 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004076 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004077 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004078 if (rc < 0) {
4079 return rc;
4080 } else if (rc) {
4081 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4082 * Forward by arbitrary ilc, injection will take care of
4083 * nullification if necessary.
4084 */
4085 pgm_info = vcpu->arch.pgm;
4086 ilen = 4;
4087 }
David Hildenbrand56317922016-01-12 17:37:58 +01004088 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4089 kvm_s390_forward_psw(vcpu, ilen);
4090 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004091}
4092
Thomas Huth3fb4c402013-09-12 10:33:43 +02004093static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4094{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004095 struct mcck_volatile_info *mcck_info;
4096 struct sie_page *sie_page;
4097
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004098 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4099 vcpu->arch.sie_block->icptcode);
4100 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4101
David Hildenbrand27291e22014-01-23 12:26:52 +01004102 if (guestdbg_enabled(vcpu))
4103 kvm_s390_restore_guest_per_regs(vcpu);
4104
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004105 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4106 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004107
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004108 if (exit_reason == -EINTR) {
4109 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4110 sie_page = container_of(vcpu->arch.sie_block,
4111 struct sie_page, sie_block);
4112 mcck_info = &sie_page->mcck_info;
4113 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4114 return 0;
4115 }
4116
David Hildenbrand71f116b2015-10-19 16:24:28 +02004117 if (vcpu->arch.sie_block->icptcode > 0) {
4118 int rc = kvm_handle_sie_intercept(vcpu);
4119
4120 if (rc != -EOPNOTSUPP)
4121 return rc;
4122 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4123 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4124 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4125 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4126 return -EREMOTE;
4127 } else if (exit_reason != -EFAULT) {
4128 vcpu->stat.exit_null++;
4129 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004130 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4131 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4132 vcpu->run->s390_ucontrol.trans_exc_code =
4133 current->thread.gmap_addr;
4134 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004135 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004136 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004137 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004138 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004139 if (kvm_arch_setup_async_pf(vcpu))
4140 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004141 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004142 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004143 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004144 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004145}
4146
Janosch Frank3adae0b2019-12-13 08:26:06 -05004147#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004148static int __vcpu_run(struct kvm_vcpu *vcpu)
4149{
4150 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004151 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004152
Thomas Huth800c1062013-09-12 10:33:45 +02004153 /*
4154 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4155 * ning the guest), so that memslots (and other stuff) are protected
4156 */
4157 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4158
Thomas Hutha76ccff2013-09-12 10:33:44 +02004159 do {
4160 rc = vcpu_pre_run(vcpu);
4161 if (rc)
4162 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004163
Thomas Huth800c1062013-09-12 10:33:45 +02004164 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004165 /*
4166 * As PF_VCPU will be used in fault handler, between
4167 * guest_enter and guest_exit should be no uaccess.
4168 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004169 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004170 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004171 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004172 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004173 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4174 memcpy(sie_page->pv_grregs,
4175 vcpu->run->s.regs.gprs,
4176 sizeof(sie_page->pv_grregs));
4177 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004178 if (test_cpu_flag(CIF_FPU))
4179 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004180 exit_reason = sie64a(vcpu->arch.sie_block,
4181 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004182 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4183 memcpy(vcpu->run->s.regs.gprs,
4184 sie_page->pv_grregs,
4185 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004186 /*
4187 * We're not allowed to inject interrupts on intercepts
4188 * that leave the guest state in an "in-between" state
4189 * where the next SIE entry will do a continuation.
4190 * Fence interrupts in our "internal" PSW.
4191 */
4192 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4193 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4194 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4195 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004196 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004197 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004198 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004199 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004200 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004201 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004202
Thomas Hutha76ccff2013-09-12 10:33:44 +02004203 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004204 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004205
Thomas Huth800c1062013-09-12 10:33:45 +02004206 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004207 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004208}
4209
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004210static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004211{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004212 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004213 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004214 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004215
4216 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004217 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004218 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4219 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004220 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004221 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4222 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4223 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4224 }
4225 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4226 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4227 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4228 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004229 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4230 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004231 }
Collin Walling23a60f82020-06-22 11:46:36 -04004232 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4233 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4234 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4235 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004236 /*
4237 * If userspace sets the riccb (e.g. after migration) to a valid state,
4238 * we should enable RI here instead of doing the lazy enablement.
4239 */
4240 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004241 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004242 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004243 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004244 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004245 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004246 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004247 /*
4248 * If userspace sets the gscb (e.g. after migration) to non-zero,
4249 * we should enable GS here instead of doing the lazy enablement.
4250 */
4251 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4252 test_kvm_facility(vcpu->kvm, 133) &&
4253 gscb->gssm &&
4254 !vcpu->arch.gs_enabled) {
4255 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4256 vcpu->arch.sie_block->ecb |= ECB_GS;
4257 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4258 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004259 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004260 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4261 test_kvm_facility(vcpu->kvm, 82)) {
4262 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4263 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4264 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004265 if (MACHINE_HAS_GS) {
4266 preempt_disable();
4267 __ctl_set_bit(2, 4);
4268 if (current->thread.gs_cb) {
4269 vcpu->arch.host_gscb = current->thread.gs_cb;
4270 save_gs_cb(vcpu->arch.host_gscb);
4271 }
4272 if (vcpu->arch.gs_enabled) {
4273 current->thread.gs_cb = (struct gs_cb *)
4274 &vcpu->run->s.regs.gscb;
4275 restore_gs_cb(current->thread.gs_cb);
4276 }
4277 preempt_enable();
4278 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004279 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004280}
4281
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004282static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004283{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004284 struct kvm_run *kvm_run = vcpu->run;
4285
Janosch Frank811ea792019-06-14 13:11:21 +02004286 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4287 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4288 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4289 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4290 /* some control register changes require a tlb flush */
4291 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4292 }
4293 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4294 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4295 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4296 }
4297 save_access_regs(vcpu->arch.host_acrs);
4298 restore_access_regs(vcpu->run->s.regs.acrs);
4299 /* save host (userspace) fprs/vrs */
4300 save_fpu_regs();
4301 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4302 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4303 if (MACHINE_HAS_VX)
4304 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4305 else
4306 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4307 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4308 if (test_fp_ctl(current->thread.fpu.fpc))
4309 /* User space provided an invalid FPC, let's clear it */
4310 current->thread.fpu.fpc = 0;
4311
4312 /* Sync fmt2 only data */
4313 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004314 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004315 } else {
4316 /*
4317 * In several places we have to modify our internal view to
4318 * not do things that are disallowed by the ultravisor. For
4319 * example we must not inject interrupts after specific exits
4320 * (e.g. 112 prefix page not secure). We do this by turning
4321 * off the machine check, external and I/O interrupt bits
4322 * of our PSW copy. To avoid getting validity intercepts, we
4323 * do only accept the condition code from userspace.
4324 */
4325 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4326 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4327 PSW_MASK_CC;
4328 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004329
David Hildenbrandb028ee32014-07-17 10:47:43 +02004330 kvm_run->kvm_dirty_regs = 0;
4331}
4332
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004333static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004334{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004335 struct kvm_run *kvm_run = vcpu->run;
4336
David Hildenbrandb028ee32014-07-17 10:47:43 +02004337 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4338 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4339 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004340 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004341 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004342 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004343 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004344 __ctl_set_bit(2, 4);
4345 if (vcpu->arch.gs_enabled)
4346 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004347 current->thread.gs_cb = vcpu->arch.host_gscb;
4348 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004349 if (!vcpu->arch.host_gscb)
4350 __ctl_clear_bit(2, 4);
4351 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004352 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004353 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004354 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004355}
4356
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004357static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004358{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004359 struct kvm_run *kvm_run = vcpu->run;
4360
Janosch Frank811ea792019-06-14 13:11:21 +02004361 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4362 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4363 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4364 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4365 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4366 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4367 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4368 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4369 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4370 save_access_regs(vcpu->run->s.regs.acrs);
4371 restore_access_regs(vcpu->arch.host_acrs);
4372 /* Save guest register state */
4373 save_fpu_regs();
4374 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4375 /* Restore will be done lazily at return */
4376 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4377 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4378 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004379 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004380}
4381
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004382int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004383{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004384 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004385 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004386
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004387 if (kvm_run->immediate_exit)
4388 return -EINTR;
4389
Thomas Huth200824f2019-09-04 10:51:59 +02004390 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4391 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4392 return -EINVAL;
4393
Christoffer Dallaccb7572017-12-04 21:35:25 +01004394 vcpu_load(vcpu);
4395
David Hildenbrand27291e22014-01-23 12:26:52 +01004396 if (guestdbg_exit_pending(vcpu)) {
4397 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004398 rc = 0;
4399 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004400 }
4401
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004402 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004403
Janosch Frankfe28c7862019-05-15 13:24:30 +02004404 /*
4405 * no need to check the return value of vcpu_start as it can only have
4406 * an error for protvirt, but protvirt means user cpu state
4407 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004408 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4409 kvm_s390_vcpu_start(vcpu);
4410 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004411 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004412 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004413 rc = -EINVAL;
4414 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004415 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004416
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004417 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004418 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004419
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004420 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004421 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004422
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004423 if (signal_pending(current) && !rc) {
4424 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004425 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004426 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004427
David Hildenbrand27291e22014-01-23 12:26:52 +01004428 if (guestdbg_exit_pending(vcpu) && !rc) {
4429 kvm_s390_prepare_debug_exit(vcpu);
4430 rc = 0;
4431 }
4432
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004433 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004434 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004435 rc = 0;
4436 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004437
David Hildenbranddb0758b2016-02-15 09:42:25 +01004438 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004439 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004440
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004441 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004442
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004443 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004444out:
4445 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004446 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004447}
4448
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004449/*
4450 * store status at address
4451 * we use have two special cases:
4452 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4453 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4454 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004455int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004456{
Carsten Otte092670c2011-07-24 10:48:22 +02004457 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004458 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004459 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004460 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004461 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004462
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004463 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004464 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4465 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004466 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004467 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004468 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4469 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004470 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004471 gpa = px;
4472 } else
4473 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004474
4475 /* manually convert vector registers if necessary */
4476 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004477 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004478 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4479 fprs, 128);
4480 } else {
4481 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004482 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004483 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004484 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004485 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004486 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004487 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004488 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004489 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004490 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004491 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004492 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004493 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004494 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004495 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004496 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004497 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004498 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004499 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004500 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004501 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004502 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004503 &vcpu->arch.sie_block->gcr, 128);
4504 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004505}
4506
Thomas Huthe8798922013-11-06 15:46:33 +01004507int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4508{
4509 /*
4510 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004511 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004512 * it into the save area
4513 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004514 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004515 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004516 save_access_regs(vcpu->run->s.regs.acrs);
4517
4518 return kvm_s390_store_status_unloaded(vcpu, addr);
4519}
4520
David Hildenbrand8ad35752014-03-14 11:00:21 +01004521static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4522{
4523 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004524 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004525}
4526
4527static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4528{
4529 unsigned int i;
4530 struct kvm_vcpu *vcpu;
4531
4532 kvm_for_each_vcpu(i, vcpu, kvm) {
4533 __disable_ibs_on_vcpu(vcpu);
4534 }
4535}
4536
4537static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4538{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004539 if (!sclp.has_ibs)
4540 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004541 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004542 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004543}
4544
Janosch Frankfe28c7862019-05-15 13:24:30 +02004545int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004546{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004547 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004548
4549 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004550 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004551
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004552 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004553 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004554 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004555 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4556
Janosch Frankfe28c7862019-05-15 13:24:30 +02004557 /* Let's tell the UV that we want to change into the operating state */
4558 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4559 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4560 if (r) {
4561 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4562 return r;
4563 }
4564 }
4565
David Hildenbrand8ad35752014-03-14 11:00:21 +01004566 for (i = 0; i < online_vcpus; i++) {
4567 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4568 started_vcpus++;
4569 }
4570
4571 if (started_vcpus == 0) {
4572 /* we're the only active VCPU -> speed it up */
4573 __enable_ibs_on_vcpu(vcpu);
4574 } else if (started_vcpus == 1) {
4575 /*
4576 * As we are starting a second VCPU, we have to disable
4577 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304578 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004579 */
4580 __disable_ibs_on_all_vcpus(vcpu->kvm);
4581 }
4582
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004583 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004584 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004585 * The real PSW might have changed due to a RESTART interpreted by the
4586 * ultravisor. We block all interrupts and let the next sie exit
4587 * refresh our view.
4588 */
4589 if (kvm_s390_pv_cpu_is_protected(vcpu))
4590 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4591 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004592 * Another VCPU might have used IBS while we were offline.
4593 * Let's play safe and flush the VCPU at startup.
4594 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004595 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004596 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004597 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004598}
4599
Janosch Frankfe28c7862019-05-15 13:24:30 +02004600int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004601{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004602 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004603 struct kvm_vcpu *started_vcpu = NULL;
4604
4605 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004606 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004607
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004608 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004609 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004610 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004611 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4612
Janosch Frankfe28c7862019-05-15 13:24:30 +02004613 /* Let's tell the UV that we want to change into the stopped state */
4614 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4615 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4616 if (r) {
4617 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4618 return r;
4619 }
4620 }
4621
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004622 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004623 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004624
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004625 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004626 __disable_ibs_on_vcpu(vcpu);
4627
4628 for (i = 0; i < online_vcpus; i++) {
4629 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4630 started_vcpus++;
4631 started_vcpu = vcpu->kvm->vcpus[i];
4632 }
4633 }
4634
4635 if (started_vcpus == 1) {
4636 /*
4637 * As we only have one VCPU left, we want to enable the
4638 * IBS facility for that VCPU to speed it up.
4639 */
4640 __enable_ibs_on_vcpu(started_vcpu);
4641 }
4642
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004643 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004644 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004645}
4646
Cornelia Huckd6712df2012-12-20 15:32:11 +01004647static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4648 struct kvm_enable_cap *cap)
4649{
4650 int r;
4651
4652 if (cap->flags)
4653 return -EINVAL;
4654
4655 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004656 case KVM_CAP_S390_CSS_SUPPORT:
4657 if (!vcpu->kvm->arch.css_support) {
4658 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004659 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004660 trace_kvm_s390_enable_css(vcpu->kvm);
4661 }
4662 r = 0;
4663 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004664 default:
4665 r = -EINVAL;
4666 break;
4667 }
4668 return r;
4669}
4670
Janosch Frank19e12272019-04-02 09:21:06 +02004671static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4672 struct kvm_s390_mem_op *mop)
4673{
4674 void __user *uaddr = (void __user *)mop->buf;
4675 int r = 0;
4676
4677 if (mop->flags || !mop->size)
4678 return -EINVAL;
4679 if (mop->size + mop->sida_offset < mop->size)
4680 return -EINVAL;
4681 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4682 return -E2BIG;
4683
4684 switch (mop->op) {
4685 case KVM_S390_MEMOP_SIDA_READ:
4686 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4687 mop->sida_offset), mop->size))
4688 r = -EFAULT;
4689
4690 break;
4691 case KVM_S390_MEMOP_SIDA_WRITE:
4692 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4693 mop->sida_offset), uaddr, mop->size))
4694 r = -EFAULT;
4695 break;
4696 }
4697 return r;
4698}
Thomas Huth41408c282015-02-06 15:01:21 +01004699static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4700 struct kvm_s390_mem_op *mop)
4701{
4702 void __user *uaddr = (void __user *)mop->buf;
4703 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004704 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004705 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4706 | KVM_S390_MEMOP_F_CHECK_ONLY;
4707
Thomas Hutha13b03b2019-08-29 14:25:17 +02004708 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004709 return -EINVAL;
4710
4711 if (mop->size > MEM_OP_MAX_SIZE)
4712 return -E2BIG;
4713
Janosch Frank19e12272019-04-02 09:21:06 +02004714 if (kvm_s390_pv_cpu_is_protected(vcpu))
4715 return -EINVAL;
4716
Thomas Huth41408c282015-02-06 15:01:21 +01004717 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4718 tmpbuf = vmalloc(mop->size);
4719 if (!tmpbuf)
4720 return -ENOMEM;
4721 }
4722
Thomas Huth41408c282015-02-06 15:01:21 +01004723 switch (mop->op) {
4724 case KVM_S390_MEMOP_LOGICAL_READ:
4725 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004726 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4727 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004728 break;
4729 }
4730 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4731 if (r == 0) {
4732 if (copy_to_user(uaddr, tmpbuf, mop->size))
4733 r = -EFAULT;
4734 }
4735 break;
4736 case KVM_S390_MEMOP_LOGICAL_WRITE:
4737 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004738 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4739 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004740 break;
4741 }
4742 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4743 r = -EFAULT;
4744 break;
4745 }
4746 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4747 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004748 }
4749
Thomas Huth41408c282015-02-06 15:01:21 +01004750 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4751 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4752
4753 vfree(tmpbuf);
4754 return r;
4755}
4756
Janosch Frank19e12272019-04-02 09:21:06 +02004757static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4758 struct kvm_s390_mem_op *mop)
4759{
4760 int r, srcu_idx;
4761
4762 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4763
4764 switch (mop->op) {
4765 case KVM_S390_MEMOP_LOGICAL_READ:
4766 case KVM_S390_MEMOP_LOGICAL_WRITE:
4767 r = kvm_s390_guest_mem_op(vcpu, mop);
4768 break;
4769 case KVM_S390_MEMOP_SIDA_READ:
4770 case KVM_S390_MEMOP_SIDA_WRITE:
4771 /* we are locked against sida going away by the vcpu->mutex */
4772 r = kvm_s390_guest_sida_op(vcpu, mop);
4773 break;
4774 default:
4775 r = -EINVAL;
4776 }
4777
4778 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4779 return r;
4780}
4781
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004782long kvm_arch_vcpu_async_ioctl(struct file *filp,
4783 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004784{
4785 struct kvm_vcpu *vcpu = filp->private_data;
4786 void __user *argp = (void __user *)arg;
4787
Avi Kivity93736622010-05-13 12:35:17 +03004788 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004789 case KVM_S390_IRQ: {
4790 struct kvm_s390_irq s390irq;
4791
Jens Freimann47b43c52014-11-11 20:57:06 +01004792 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004793 return -EFAULT;
4794 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004795 }
Avi Kivity93736622010-05-13 12:35:17 +03004796 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004797 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004798 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004799
4800 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004801 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004802 if (s390int_to_s390irq(&s390int, &s390irq))
4803 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004804 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004805 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004806 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004807 return -ENOIOCTLCMD;
4808}
4809
4810long kvm_arch_vcpu_ioctl(struct file *filp,
4811 unsigned int ioctl, unsigned long arg)
4812{
4813 struct kvm_vcpu *vcpu = filp->private_data;
4814 void __user *argp = (void __user *)arg;
4815 int idx;
4816 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004817 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004818
4819 vcpu_load(vcpu);
4820
4821 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004822 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004823 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004824 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004825 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004826 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004827 case KVM_S390_SET_INITIAL_PSW: {
4828 psw_t psw;
4829
Avi Kivitybc923cc2010-05-13 12:21:46 +03004830 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004831 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004832 break;
4833 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4834 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004835 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004836 case KVM_S390_CLEAR_RESET:
4837 r = 0;
4838 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004839 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4840 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4841 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4842 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4843 rc, rrc);
4844 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004845 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004846 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004847 r = 0;
4848 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004849 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4850 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4851 UVC_CMD_CPU_RESET_INITIAL,
4852 &rc, &rrc);
4853 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4854 rc, rrc);
4855 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004856 break;
4857 case KVM_S390_NORMAL_RESET:
4858 r = 0;
4859 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004860 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4861 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4862 UVC_CMD_CPU_RESET, &rc, &rrc);
4863 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4864 rc, rrc);
4865 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004866 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004867 case KVM_SET_ONE_REG:
4868 case KVM_GET_ONE_REG: {
4869 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004870 r = -EINVAL;
4871 if (kvm_s390_pv_cpu_is_protected(vcpu))
4872 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004873 r = -EFAULT;
4874 if (copy_from_user(&reg, argp, sizeof(reg)))
4875 break;
4876 if (ioctl == KVM_SET_ONE_REG)
4877 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4878 else
4879 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4880 break;
4881 }
Carsten Otte27e03932012-01-04 10:25:21 +01004882#ifdef CONFIG_KVM_S390_UCONTROL
4883 case KVM_S390_UCAS_MAP: {
4884 struct kvm_s390_ucas_mapping ucasmap;
4885
4886 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4887 r = -EFAULT;
4888 break;
4889 }
4890
4891 if (!kvm_is_ucontrol(vcpu->kvm)) {
4892 r = -EINVAL;
4893 break;
4894 }
4895
4896 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4897 ucasmap.vcpu_addr, ucasmap.length);
4898 break;
4899 }
4900 case KVM_S390_UCAS_UNMAP: {
4901 struct kvm_s390_ucas_mapping ucasmap;
4902
4903 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4904 r = -EFAULT;
4905 break;
4906 }
4907
4908 if (!kvm_is_ucontrol(vcpu->kvm)) {
4909 r = -EINVAL;
4910 break;
4911 }
4912
4913 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4914 ucasmap.length);
4915 break;
4916 }
4917#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004918 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004919 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004920 break;
4921 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004922 case KVM_ENABLE_CAP:
4923 {
4924 struct kvm_enable_cap cap;
4925 r = -EFAULT;
4926 if (copy_from_user(&cap, argp, sizeof(cap)))
4927 break;
4928 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4929 break;
4930 }
Thomas Huth41408c282015-02-06 15:01:21 +01004931 case KVM_S390_MEM_OP: {
4932 struct kvm_s390_mem_op mem_op;
4933
4934 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004935 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004936 else
4937 r = -EFAULT;
4938 break;
4939 }
Jens Freimann816c7662014-11-24 17:13:46 +01004940 case KVM_S390_SET_IRQ_STATE: {
4941 struct kvm_s390_irq_state irq_state;
4942
4943 r = -EFAULT;
4944 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4945 break;
4946 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4947 irq_state.len == 0 ||
4948 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4949 r = -EINVAL;
4950 break;
4951 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004952 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004953 r = kvm_s390_set_irq_state(vcpu,
4954 (void __user *) irq_state.buf,
4955 irq_state.len);
4956 break;
4957 }
4958 case KVM_S390_GET_IRQ_STATE: {
4959 struct kvm_s390_irq_state irq_state;
4960
4961 r = -EFAULT;
4962 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4963 break;
4964 if (irq_state.len == 0) {
4965 r = -EINVAL;
4966 break;
4967 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004968 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004969 r = kvm_s390_get_irq_state(vcpu,
4970 (__u8 __user *) irq_state.buf,
4971 irq_state.len);
4972 break;
4973 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004974 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004975 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004976 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004977
4978 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004979 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004980}
4981
Souptick Joarder1499fa82018-04-19 00:49:58 +05304982vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004983{
4984#ifdef CONFIG_KVM_S390_UCONTROL
4985 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4986 && (kvm_is_ucontrol(vcpu->kvm))) {
4987 vmf->page = virt_to_page(vcpu->arch.sie_block);
4988 get_page(vmf->page);
4989 return 0;
4990 }
4991#endif
4992 return VM_FAULT_SIGBUS;
4993}
4994
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004995/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004996int kvm_arch_prepare_memory_region(struct kvm *kvm,
4997 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004998 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004999 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005000{
Nick Wangdd2887e2013-03-25 17:22:57 +01005001 /* A few sanity checks. We can have memory slots which have to be
5002 located/ended at a segment boundary (1MB). The memory in userland is
5003 ok to be fragmented into various different vmas. It is okay to mmap()
5004 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005005
Carsten Otte598841c2011-07-24 10:48:21 +02005006 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005007 return -EINVAL;
5008
Carsten Otte598841c2011-07-24 10:48:21 +02005009 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005010 return -EINVAL;
5011
Dominik Dingela3a92c32014-12-01 17:24:42 +01005012 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5013 return -EINVAL;
5014
Janosch Frank29b40f12019-09-30 04:19:18 -04005015 /* When we are protected, we should not change the memory slots */
5016 if (kvm_s390_pv_get_handle(kvm))
5017 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005018 return 0;
5019}
5020
5021void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005022 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005023 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005024 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005025 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005026{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005027 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005028
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005029 switch (change) {
5030 case KVM_MR_DELETE:
5031 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5032 old->npages * PAGE_SIZE);
5033 break;
5034 case KVM_MR_MOVE:
5035 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5036 old->npages * PAGE_SIZE);
5037 if (rc)
5038 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005039 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005040 case KVM_MR_CREATE:
5041 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5042 mem->guest_phys_addr, mem->memory_size);
5043 break;
5044 case KVM_MR_FLAGS_ONLY:
5045 break;
5046 default:
5047 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5048 }
Carsten Otte598841c2011-07-24 10:48:21 +02005049 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005050 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005051 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005052}
5053
Alexander Yarygin60a37702016-04-01 15:38:57 +03005054static inline unsigned long nonhyp_mask(int i)
5055{
5056 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5057
5058 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5059}
5060
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005061void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5062{
5063 vcpu->valid_wakeup = false;
5064}
5065
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005066static int __init kvm_s390_init(void)
5067{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005068 int i;
5069
David Hildenbrand07197fd2015-01-30 16:01:38 +01005070 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005071 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005072 return -ENODEV;
5073 }
5074
Janosch Franka4499382018-07-13 11:28:31 +01005075 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005076 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005077 return -EINVAL;
5078 }
5079
Alexander Yarygin60a37702016-04-01 15:38:57 +03005080 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005081 kvm_s390_fac_base[i] |=
Sven Schnelle17e89e12021-05-05 22:01:10 +02005082 stfle_fac_list[i] & nonhyp_mask(i);
Alexander Yarygin60a37702016-04-01 15:38:57 +03005083
Michael Mueller9d8d5782015-02-02 15:42:51 +01005084 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005085}
5086
5087static void __exit kvm_s390_exit(void)
5088{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005089 kvm_exit();
5090}
5091
5092module_init(kvm_s390_init);
5093module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005094
5095/*
5096 * Enable autoloading of the kvm module.
5097 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5098 * since x86 takes a different approach.
5099 */
5100#include <linux/miscdevice.h>
5101MODULE_ALIAS_MISCDEV(KVM_MINOR);
5102MODULE_ALIAS("devname:kvm");