blob: c6257f625929cc14f472c797060e0fc4e1c254e8 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000069
70const struct kvm_stats_header kvm_vm_stats_header = {
71 .name_size = KVM_STATS_NAME_SIZE,
72 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
73 .id_offset = sizeof(struct kvm_stats_header),
74 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
75 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
76 sizeof(kvm_vm_stats_desc),
77};
78
Jing Zhangce55c042021-06-18 22:27:06 +000079const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
80 KVM_GENERIC_VCPU_STATS(),
81 STATS_DESC_COUNTER(VCPU, exit_userspace),
82 STATS_DESC_COUNTER(VCPU, exit_null),
83 STATS_DESC_COUNTER(VCPU, exit_external_request),
84 STATS_DESC_COUNTER(VCPU, exit_io_request),
85 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
86 STATS_DESC_COUNTER(VCPU, exit_stop_request),
87 STATS_DESC_COUNTER(VCPU, exit_validity),
88 STATS_DESC_COUNTER(VCPU, exit_instruction),
89 STATS_DESC_COUNTER(VCPU, exit_pei),
90 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
91 STATS_DESC_COUNTER(VCPU, instruction_lctl),
92 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
93 STATS_DESC_COUNTER(VCPU, instruction_stctl),
94 STATS_DESC_COUNTER(VCPU, instruction_stctg),
95 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
96 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
97 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
98 STATS_DESC_COUNTER(VCPU, deliver_ckc),
99 STATS_DESC_COUNTER(VCPU, deliver_cputm),
100 STATS_DESC_COUNTER(VCPU, deliver_external_call),
101 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
102 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
103 STATS_DESC_COUNTER(VCPU, deliver_virtio),
104 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
106 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_program),
108 STATS_DESC_COUNTER(VCPU, deliver_io),
109 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
110 STATS_DESC_COUNTER(VCPU, exit_wait_state),
111 STATS_DESC_COUNTER(VCPU, inject_ckc),
112 STATS_DESC_COUNTER(VCPU, inject_cputm),
113 STATS_DESC_COUNTER(VCPU, inject_external_call),
114 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
115 STATS_DESC_COUNTER(VCPU, inject_mchk),
116 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
117 STATS_DESC_COUNTER(VCPU, inject_program),
118 STATS_DESC_COUNTER(VCPU, inject_restart),
119 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
120 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
121 STATS_DESC_COUNTER(VCPU, instruction_epsw),
122 STATS_DESC_COUNTER(VCPU, instruction_gs),
123 STATS_DESC_COUNTER(VCPU, instruction_io_other),
124 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
125 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
126 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
127 STATS_DESC_COUNTER(VCPU, instruction_ptff),
128 STATS_DESC_COUNTER(VCPU, instruction_sck),
129 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
130 STATS_DESC_COUNTER(VCPU, instruction_stidp),
131 STATS_DESC_COUNTER(VCPU, instruction_spx),
132 STATS_DESC_COUNTER(VCPU, instruction_stpx),
133 STATS_DESC_COUNTER(VCPU, instruction_stap),
134 STATS_DESC_COUNTER(VCPU, instruction_iske),
135 STATS_DESC_COUNTER(VCPU, instruction_ri),
136 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
137 STATS_DESC_COUNTER(VCPU, instruction_sske),
138 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
139 STATS_DESC_COUNTER(VCPU, instruction_stsi),
140 STATS_DESC_COUNTER(VCPU, instruction_stfl),
141 STATS_DESC_COUNTER(VCPU, instruction_tb),
142 STATS_DESC_COUNTER(VCPU, instruction_tpi),
143 STATS_DESC_COUNTER(VCPU, instruction_tprot),
144 STATS_DESC_COUNTER(VCPU, instruction_tsch),
145 STATS_DESC_COUNTER(VCPU, instruction_sie),
146 STATS_DESC_COUNTER(VCPU, instruction_essa),
147 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
148 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
149 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
Christian Borntraegerbb000f642021-07-26 17:01:08 +0200164 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
165 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
167 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
168 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
169 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
170 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
Jing Zhangce55c042021-06-18 22:27:06 +0000173 STATS_DESC_COUNTER(VCPU, pfault_sync)
174};
Jing Zhangce55c042021-06-18 22:27:06 +0000175
176const struct kvm_stats_header kvm_vcpu_stats_header = {
177 .name_size = KVM_STATS_NAME_SIZE,
178 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
179 .id_offset = sizeof(struct kvm_stats_header),
180 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
181 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
182 sizeof(kvm_vcpu_stats_desc),
183};
184
David Hildenbranda411edf2016-02-02 15:41:22 +0100185/* allow nested virtualization in KVM (if enabled by user space) */
186static int nested;
187module_param(nested, int, S_IRUGO);
188MODULE_PARM_DESC(nested, "Nested virtualization support");
189
Janosch Franka4499382018-07-13 11:28:31 +0100190/* allow 1m huge page guest backing, if !nested */
191static int hpage;
192module_param(hpage, int, 0444);
193MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100194
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500195/* maximum percentage of steal time for polling. >100 is treated like 100 */
196static u8 halt_poll_max_steal = 10;
197module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000198MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199
Michael Muellercc674ef2020-02-27 10:10:31 +0100200/* if set to true, the GISA will be initialized and used if available */
201static bool use_gisa = true;
202module_param(use_gisa, bool, 0644);
203MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
204
Pierre Morel87e28a12020-09-07 15:26:07 +0200205/* maximum diag9c forwarding per second */
206unsigned int diag9c_forwarding_hz;
207module_param(diag9c_forwarding_hz, uint, 0644);
208MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
209
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000210/*
211 * For now we handle at most 16 double words as this is what the s390 base
212 * kernel handles and stores in the prefix page. If we ever need to go beyond
213 * this, this requires changes to code, but the external uapi can stay.
214 */
215#define SIZE_INTERNAL 16
216
217/*
218 * Base feature mask that defines default mask for facilities. Consists of the
219 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
220 */
221static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
222/*
223 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
224 * and defines the facilities that can be enabled via a cpu model.
225 */
226static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
227
228static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200229{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000230 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
231 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
232 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
Sven Schnelle17e89e12021-05-05 22:01:10 +0200233 sizeof(stfle_fac_list));
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234
235 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200236}
237
David Hildenbrand15c97052015-03-19 17:36:43 +0100238/* available cpu features supported by kvm */
239static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200240/* available subfunctions indicated via query / "test bit" */
241static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100242
Michael Mueller9d8d5782015-02-02 15:42:51 +0100243static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200244static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200245debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400246debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100248/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200249int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250{
251 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200252 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253}
254
Sean Christophersonb9904082020-03-21 13:25:55 -0700255int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700256{
257 return 0;
258}
259
Janosch Frank29b40f12019-09-30 04:19:18 -0400260/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100261static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
262 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400263static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200264
David Hildenbrand15757672018-02-07 12:46:45 +0100265static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
266{
267 u8 delta_idx = 0;
268
269 /*
270 * The TOD jumps by delta, we have to compensate this by adding
271 * -delta to the epoch.
272 */
273 delta = -delta;
274
275 /* sign-extension - we're adding to signed values below */
276 if ((s64)delta < 0)
277 delta_idx = -1;
278
279 scb->epoch += delta;
280 if (scb->ecd & ECD_MEF) {
281 scb->epdx += delta_idx;
282 if (scb->epoch < delta)
283 scb->epdx += 1;
284 }
285}
286
Fan Zhangfdf03652015-05-13 10:58:41 +0200287/*
288 * This callback is executed during stop_machine(). All CPUs are therefore
289 * temporarily stopped. In order not to change guest behavior, we have to
290 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
291 * so a CPU won't be stopped while calculating with the epoch.
292 */
293static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
294 void *v)
295{
296 struct kvm *kvm;
297 struct kvm_vcpu *vcpu;
298 int i;
299 unsigned long long *delta = v;
300
301 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100303 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
304 if (i == 0) {
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
307 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100308 if (vcpu->arch.cputm_enabled)
309 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100310 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100311 kvm_clock_sync_scb(vcpu->arch.vsie_block,
312 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200313 }
314 }
315 return NOTIFY_OK;
316}
317
318static struct notifier_block kvm_clock_notifier = {
319 .notifier_call = kvm_clock_sync,
320};
321
Sean Christophersonb9904082020-03-21 13:25:55 -0700322int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200324 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100325 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200326 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
327 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200328 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
329 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330 return 0;
331}
332
333void kvm_arch_hardware_unsetup(void)
334{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100335 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200336 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200337 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
338 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339}
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341static void allow_cpu_feat(unsigned long nr)
342{
343 set_bit_inv(nr, kvm_s390_available_cpu_feat);
344}
345
David Hildenbrand0a763c72016-05-18 16:03:47 +0200346static inline int plo_test_bit(unsigned char nr)
347{
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200348 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100349 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350
351 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200352 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 /* Parameter registers are ignored for "test bit" */
354 " plo 0,0,0,0(0)\n"
355 " ipm %0\n"
356 " srl %0,28\n"
357 : "=d" (cc)
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200358 : [function] "d" (function)
359 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200360 return cc == 0;
361}
362
Heiko Carstensd0dea732019-10-02 14:34:37 +0200363static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500364{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500365 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200366 " lghi 0,0\n"
367 " lgr 1,%[query]\n"
368 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500369 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200370 :
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200371 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
372 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500373}
374
Christian Borntraeger173aec22018-12-28 10:59:06 +0100375#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100376#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100377
David Hildenbrand22be5a132016-01-21 13:22:54 +0100378static void kvm_s390_cpu_feat_init(void)
379{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 int i;
381
382 for (i = 0; i < 256; ++i) {
383 if (plo_test_bit(i))
384 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
385 }
386
387 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400388 ptff(kvm_s390_available_subfunc.ptff,
389 sizeof(kvm_s390_available_subfunc.ptff),
390 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200391
392 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
394 kvm_s390_available_subfunc.kmac);
395 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kmc);
397 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.km);
399 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kimd);
401 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200403 }
404 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200405 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200408 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kmctr);
410 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
411 kvm_s390_available_subfunc.kmf);
412 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmo);
414 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200416 }
417 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100418 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200419 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400421 if (test_facility(146)) /* MSA8 */
422 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
423 kvm_s390_available_subfunc.kma);
424
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100425 if (test_facility(155)) /* MSA9 */
426 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kdsa);
428
Christian Borntraeger173aec22018-12-28 10:59:06 +0100429 if (test_facility(150)) /* SORTL */
430 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
431
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100432 if (test_facility(151)) /* DFLTCC */
433 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
434
David Hildenbrand22be5a132016-01-21 13:22:54 +0100435 if (MACHINE_HAS_ESOP)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200437 /*
438 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
439 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
440 */
441 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100442 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200443 return;
444 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100445 if (sclp.has_64bscao)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100447 if (sclp.has_siif)
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100449 if (sclp.has_gpere)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100451 if (sclp.has_gsls)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100453 if (sclp.has_ib)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100455 if (sclp.has_cei)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100457 if (sclp.has_ibs)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500459 if (sclp.has_kss)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200461 /*
462 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
463 * all skey handling functions read/set the skey from the PGSTE
464 * instead of the real storage key.
465 *
466 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
467 * pages being detected as preserved although they are resident.
468 *
469 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
470 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
471 *
472 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
473 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
474 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
475 *
476 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
477 * cannot easily shadow the SCA because of the ipte lock.
478 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100479}
480
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100481int kvm_arch_init(void *opaque)
482{
Janosch Frankf76f6372019-10-02 03:56:27 -0400483 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100484
Christian Borntraeger78f26132015-07-22 15:50:58 +0200485 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
486 if (!kvm_s390_dbf)
487 return -ENOMEM;
488
Janosch Frank3e6c5562019-10-02 04:46:58 -0400489 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf_uv)
491 goto out;
492
493 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
494 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400495 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200496
David Hildenbrand22be5a132016-01-21 13:22:54 +0100497 kvm_s390_cpu_feat_init();
498
Cornelia Huck84877d92014-09-02 10:27:35 +0100499 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100500 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
501 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100502 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400503 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100505
506 rc = kvm_s390_gib_init(GAL_ISC);
507 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400508 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
Michael Mueller308c3e62018-11-30 15:32:06 +0100510 return 0;
511
Janosch Frankf76f6372019-10-02 03:56:27 -0400512out:
513 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100515}
516
Christian Borntraeger78f26132015-07-22 15:50:58 +0200517void kvm_arch_exit(void)
518{
Michael Mueller1282c212019-01-31 09:52:40 +0100519 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200520 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400521 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200522}
523
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100524/* Section: device related */
525long kvm_arch_dev_ioctl(struct file *filp,
526 unsigned int ioctl, unsigned long arg)
527{
528 if (ioctl == KVM_S390_ENABLE_SIE)
529 return s390_enable_sie();
530 return -EINVAL;
531}
532
Alexander Graf784aa3d2014-07-14 18:27:35 +0200533int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100534{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 int r;
536
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200537 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100538 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200539 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100540 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100541#ifdef CONFIG_KVM_S390_UCONTROL
542 case KVM_CAP_S390_UCONTROL:
543#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200544 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100545 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200546 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100547 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100548 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100549 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200550 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200551 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200552 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200553 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100554 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100555 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200556 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100557 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400558 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100559 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200560 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200561 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100562 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100563 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500564 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400565 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400566 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 r = 1;
568 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300569 case KVM_CAP_SET_GUEST_DEBUG2:
570 r = KVM_GUESTDBG_VALID_MASK;
571 break;
Janosch Franka4499382018-07-13 11:28:31 +0100572 case KVM_CAP_S390_HPAGE_1M:
573 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100574 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100575 r = 1;
576 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100577 case KVM_CAP_S390_MEM_OP:
578 r = MEM_OP_MAX_SIZE;
579 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200580 case KVM_CAP_NR_VCPUS:
581 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200582 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100583 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200584 if (!kvm_s390_use_sca_entries())
585 r = KVM_MAX_VCPUS;
586 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200588 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200589 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100590 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200591 break;
Eric Farman68c55752014-06-09 10:57:26 -0400592 case KVM_CAP_S390_VECTOR_REGISTERS:
593 r = MACHINE_HAS_VX;
594 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800595 case KVM_CAP_S390_RI:
596 r = test_facility(64);
597 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100598 case KVM_CAP_S390_GS:
599 r = test_facility(133);
600 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100601 case KVM_CAP_S390_BPB:
602 r = test_facility(82);
603 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500604 case KVM_CAP_S390_PROTECTED:
605 r = is_prot_virt_host();
606 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200607 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100608 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200609 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100610 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100611}
612
Sean Christopherson0dff0842020-02-18 13:07:29 -0800613void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400614{
Janosch Frank0959e162018-07-17 13:21:22 +0100615 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400616 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100617 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100619 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620
Janosch Frank0959e162018-07-17 13:21:22 +0100621 /* Loop over all guest segments */
622 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400623 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100624 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
625 gaddr = gfn_to_gpa(cur_gfn);
626 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
627 if (kvm_is_error_hva(vmaddr))
628 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400629
Janosch Frank0959e162018-07-17 13:21:22 +0100630 bitmap_zero(bitmap, _PAGE_ENTRIES);
631 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
632 for (i = 0; i < _PAGE_ENTRIES; i++) {
633 if (test_bit(i, bitmap))
634 mark_page_dirty(kvm, cur_gfn + i);
635 }
636
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100637 if (fatal_signal_pending(current))
638 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100639 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400640 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400641}
642
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100643/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200644static void sca_del_vcpu(struct kvm_vcpu *vcpu);
645
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100646/*
647 * Get (and clear) the dirty memory log for a memory slot.
648 */
649int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
650 struct kvm_dirty_log *log)
651{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400652 int r;
653 unsigned long n;
654 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800655 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400656
Janosch Franke1e8a962017-02-02 16:39:31 +0100657 if (kvm_is_ucontrol(kvm))
658 return -EINVAL;
659
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400660 mutex_lock(&kvm->slots_lock);
661
662 r = -EINVAL;
663 if (log->slot >= KVM_USER_MEM_SLOTS)
664 goto out;
665
Sean Christopherson2a49f612020-02-18 13:07:30 -0800666 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400667 if (r)
668 goto out;
669
670 /* Clear the dirty log */
671 if (is_dirty) {
672 n = kvm_dirty_bitmap_bytes(memslot);
673 memset(memslot->dirty_bitmap, 0, n);
674 }
675 r = 0;
676out:
677 mutex_unlock(&kvm->slots_lock);
678 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100679}
680
David Hildenbrand6502a342016-06-21 14:19:51 +0200681static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
682{
683 unsigned int i;
684 struct kvm_vcpu *vcpu;
685
686 kvm_for_each_vcpu(i, vcpu, kvm) {
687 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
688 }
689}
690
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100691int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200692{
693 int r;
694
695 if (cap->flags)
696 return -EINVAL;
697
698 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200699 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200700 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200701 kvm->arch.use_irqchip = 1;
702 r = 0;
703 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200704 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200705 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200706 kvm->arch.user_sigp = 1;
707 r = 0;
708 break;
Eric Farman68c55752014-06-09 10:57:26 -0400709 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100710 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200711 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100712 r = -EBUSY;
713 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100714 set_kvm_facility(kvm->arch.model.fac_mask, 129);
715 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200716 if (test_facility(134)) {
717 set_kvm_facility(kvm->arch.model.fac_mask, 134);
718 set_kvm_facility(kvm->arch.model.fac_list, 134);
719 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100720 if (test_facility(135)) {
721 set_kvm_facility(kvm->arch.model.fac_mask, 135);
722 set_kvm_facility(kvm->arch.model.fac_list, 135);
723 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100724 if (test_facility(148)) {
725 set_kvm_facility(kvm->arch.model.fac_mask, 148);
726 set_kvm_facility(kvm->arch.model.fac_list, 148);
727 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100728 if (test_facility(152)) {
729 set_kvm_facility(kvm->arch.model.fac_mask, 152);
730 set_kvm_facility(kvm->arch.model.fac_list, 152);
731 }
Christian Borntraeger1f703d22021-01-25 13:39:45 +0100732 if (test_facility(192)) {
733 set_kvm_facility(kvm->arch.model.fac_mask, 192);
734 set_kvm_facility(kvm->arch.model.fac_list, 192);
735 }
Michael Mueller18280d82015-03-16 16:05:41 +0100736 r = 0;
737 } else
738 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100739 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200740 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
741 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400742 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800743 case KVM_CAP_S390_RI:
744 r = -EINVAL;
745 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200746 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800747 r = -EBUSY;
748 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100749 set_kvm_facility(kvm->arch.model.fac_mask, 64);
750 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800751 r = 0;
752 }
753 mutex_unlock(&kvm->lock);
754 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
755 r ? "(not available)" : "(success)");
756 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100757 case KVM_CAP_S390_AIS:
758 mutex_lock(&kvm->lock);
759 if (kvm->created_vcpus) {
760 r = -EBUSY;
761 } else {
762 set_kvm_facility(kvm->arch.model.fac_mask, 72);
763 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100764 r = 0;
765 }
766 mutex_unlock(&kvm->lock);
767 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
768 r ? "(not available)" : "(success)");
769 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100770 case KVM_CAP_S390_GS:
771 r = -EINVAL;
772 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100773 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100774 r = -EBUSY;
775 } else if (test_facility(133)) {
776 set_kvm_facility(kvm->arch.model.fac_mask, 133);
777 set_kvm_facility(kvm->arch.model.fac_list, 133);
778 r = 0;
779 }
780 mutex_unlock(&kvm->lock);
781 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
782 r ? "(not available)" : "(success)");
783 break;
Janosch Franka4499382018-07-13 11:28:31 +0100784 case KVM_CAP_S390_HPAGE_1M:
785 mutex_lock(&kvm->lock);
786 if (kvm->created_vcpus)
787 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100788 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100789 r = -EINVAL;
790 else {
791 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700792 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100793 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700794 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100795 /*
796 * We might have to create fake 4k page
797 * tables. To avoid that the hardware works on
798 * stale PGSTEs, we emulate these instructions.
799 */
800 kvm->arch.use_skf = 0;
801 kvm->arch.use_pfmfi = 0;
802 }
803 mutex_unlock(&kvm->lock);
804 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
805 r ? "(not available)" : "(success)");
806 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100807 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200808 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100809 kvm->arch.user_stsi = 1;
810 r = 0;
811 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200812 case KVM_CAP_S390_USER_INSTR0:
813 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
814 kvm->arch.user_instr0 = 1;
815 icpt_operexc_on_all_vcpus(kvm);
816 r = 0;
817 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200818 default:
819 r = -EINVAL;
820 break;
821 }
822 return r;
823}
824
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100825static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
826{
827 int ret;
828
829 switch (attr->attr) {
830 case KVM_S390_VM_MEM_LIMIT_SIZE:
831 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200832 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100833 kvm->arch.mem_limit);
834 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100835 ret = -EFAULT;
836 break;
837 default:
838 ret = -ENXIO;
839 break;
840 }
841 return ret;
842}
843
844static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200845{
846 int ret;
847 unsigned int idx;
848 switch (attr->attr) {
849 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100850 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100851 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200852 break;
853
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200854 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200855 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100856 if (kvm->created_vcpus)
857 ret = -EBUSY;
858 else if (kvm->mm->context.allow_gmap_hpage_1m)
859 ret = -EINVAL;
860 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200861 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100862 /* Not compatible with cmma. */
863 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200864 ret = 0;
865 }
866 mutex_unlock(&kvm->lock);
867 break;
868 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100869 ret = -ENXIO;
870 if (!sclp.has_cmma)
871 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200872 ret = -EINVAL;
873 if (!kvm->arch.use_cmma)
874 break;
875
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200876 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200877 mutex_lock(&kvm->lock);
878 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200879 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200880 srcu_read_unlock(&kvm->srcu, idx);
881 mutex_unlock(&kvm->lock);
882 ret = 0;
883 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100884 case KVM_S390_VM_MEM_LIMIT_SIZE: {
885 unsigned long new_limit;
886
887 if (kvm_is_ucontrol(kvm))
888 return -EINVAL;
889
890 if (get_user(new_limit, (u64 __user *)attr->addr))
891 return -EFAULT;
892
Dominik Dingela3a92c32014-12-01 17:24:42 +0100893 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
894 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100895 return -E2BIG;
896
Dominik Dingela3a92c32014-12-01 17:24:42 +0100897 if (!new_limit)
898 return -EINVAL;
899
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100900 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100901 if (new_limit != KVM_S390_NO_MEM_LIMIT)
902 new_limit -= 1;
903
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100904 ret = -EBUSY;
905 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200906 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100907 /* gmap_create will round the limit up */
908 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100909
910 if (!new) {
911 ret = -ENOMEM;
912 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100913 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100914 new->private = kvm;
915 kvm->arch.gmap = new;
916 ret = 0;
917 }
918 }
919 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100920 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
921 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
922 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100923 break;
924 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200925 default:
926 ret = -ENXIO;
927 break;
928 }
929 return ret;
930}
931
Tony Krowiaka374e892014-09-03 10:13:53 +0200932static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
933
Tony Krowiak20c922f2018-04-22 11:37:03 -0400934void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200935{
936 struct kvm_vcpu *vcpu;
937 int i;
938
Tony Krowiak20c922f2018-04-22 11:37:03 -0400939 kvm_s390_vcpu_block_all(kvm);
940
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400941 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400942 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400943 /* recreate the shadow crycb by leaving the VSIE handler */
944 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
945 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400946
947 kvm_s390_vcpu_unblock_all(kvm);
948}
949
950static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
951{
Tony Krowiaka374e892014-09-03 10:13:53 +0200952 mutex_lock(&kvm->lock);
953 switch (attr->attr) {
954 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200955 if (!test_kvm_facility(kvm, 76)) {
956 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400957 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200958 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200959 get_random_bytes(
960 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
961 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
962 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200963 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200964 break;
965 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200966 if (!test_kvm_facility(kvm, 76)) {
967 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400968 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200969 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200970 get_random_bytes(
971 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
972 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
973 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200974 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200975 break;
976 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200977 if (!test_kvm_facility(kvm, 76)) {
978 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400979 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200980 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200981 kvm->arch.crypto.aes_kw = 0;
982 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
983 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200984 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200985 break;
986 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200987 if (!test_kvm_facility(kvm, 76)) {
988 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400989 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200990 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200991 kvm->arch.crypto.dea_kw = 0;
992 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
993 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200994 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200995 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400996 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
997 if (!ap_instructions_available()) {
998 mutex_unlock(&kvm->lock);
999 return -EOPNOTSUPP;
1000 }
1001 kvm->arch.crypto.apie = 1;
1002 break;
1003 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1004 if (!ap_instructions_available()) {
1005 mutex_unlock(&kvm->lock);
1006 return -EOPNOTSUPP;
1007 }
1008 kvm->arch.crypto.apie = 0;
1009 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001010 default:
1011 mutex_unlock(&kvm->lock);
1012 return -ENXIO;
1013 }
1014
Tony Krowiak20c922f2018-04-22 11:37:03 -04001015 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001016 mutex_unlock(&kvm->lock);
1017 return 0;
1018}
1019
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001020static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1021{
1022 int cx;
1023 struct kvm_vcpu *vcpu;
1024
1025 kvm_for_each_vcpu(cx, vcpu, kvm)
1026 kvm_s390_sync_request(req, vcpu);
1027}
1028
1029/*
1030 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001031 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001032 */
1033static int kvm_s390_vm_start_migration(struct kvm *kvm)
1034{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001035 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001036 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001037 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 int slotnr;
1039
1040 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001041 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 slots = kvm_memslots(kvm);
1044 if (!slots || !slots->used_slots)
1045 return -EINVAL;
1046
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001047 if (!kvm->arch.use_cmma) {
1048 kvm->arch.migration_mode = 1;
1049 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001050 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001051 /* mark all the pages in active slots as dirty */
1052 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1053 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001054 if (!ms->dirty_bitmap)
1055 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001056 /*
1057 * The second half of the bitmap is only used on x86,
1058 * and would be wasted otherwise, so we put it to good
1059 * use here to keep track of the state of the storage
1060 * attributes.
1061 */
1062 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1063 ram_pages += ms->npages;
1064 }
1065 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1066 kvm->arch.migration_mode = 1;
1067 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001068 return 0;
1069}
1070
1071/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001072 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001073 * kvm_s390_vm_start_migration.
1074 */
1075static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1076{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001077 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001078 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001079 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001080 kvm->arch.migration_mode = 0;
1081 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001082 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001083 return 0;
1084}
1085
1086static int kvm_s390_vm_set_migration(struct kvm *kvm,
1087 struct kvm_device_attr *attr)
1088{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001089 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001090
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001091 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001092 switch (attr->attr) {
1093 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001094 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001095 break;
1096 case KVM_S390_VM_MIGRATION_STOP:
1097 res = kvm_s390_vm_stop_migration(kvm);
1098 break;
1099 default:
1100 break;
1101 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001102 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001103
1104 return res;
1105}
1106
1107static int kvm_s390_vm_get_migration(struct kvm *kvm,
1108 struct kvm_device_attr *attr)
1109{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001110 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001111
1112 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1113 return -ENXIO;
1114
1115 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1116 return -EFAULT;
1117 return 0;
1118}
1119
Collin L. Walling8fa16962016-07-26 15:29:44 -04001120static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1121{
1122 struct kvm_s390_vm_tod_clock gtod;
1123
1124 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1125 return -EFAULT;
1126
David Hildenbrand0e7def52018-02-07 12:46:43 +01001127 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001128 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001130
1131 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1132 gtod.epoch_idx, gtod.tod);
1133
1134 return 0;
1135}
1136
Jason J. Herne72f25022014-11-25 09:46:02 -05001137static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1138{
1139 u8 gtod_high;
1140
1141 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1142 sizeof(gtod_high)))
1143 return -EFAULT;
1144
1145 if (gtod_high != 0)
1146 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001147 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001148
1149 return 0;
1150}
1151
1152static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1153{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001154 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001155
David Hildenbrand0e7def52018-02-07 12:46:43 +01001156 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1157 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001158 return -EFAULT;
1159
David Hildenbrand0e7def52018-02-07 12:46:43 +01001160 kvm_s390_set_tod_clock(kvm, &gtod);
1161 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001162 return 0;
1163}
1164
1165static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1166{
1167 int ret;
1168
1169 if (attr->flags)
1170 return -EINVAL;
1171
1172 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001173 case KVM_S390_VM_TOD_EXT:
1174 ret = kvm_s390_set_tod_ext(kvm, attr);
1175 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001176 case KVM_S390_VM_TOD_HIGH:
1177 ret = kvm_s390_set_tod_high(kvm, attr);
1178 break;
1179 case KVM_S390_VM_TOD_LOW:
1180 ret = kvm_s390_set_tod_low(kvm, attr);
1181 break;
1182 default:
1183 ret = -ENXIO;
1184 break;
1185 }
1186 return ret;
1187}
1188
David Hildenbrand33d1b272018-04-27 14:36:13 +02001189static void kvm_s390_get_tod_clock(struct kvm *kvm,
1190 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001191{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001192 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001193
1194 preempt_disable();
1195
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001196 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001197
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001198 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001199 gtod->epoch_idx = 0;
1200 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001201 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1202 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001203 gtod->epoch_idx += 1;
1204 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001205
1206 preempt_enable();
1207}
1208
1209static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1210{
1211 struct kvm_s390_vm_tod_clock gtod;
1212
1213 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001214 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001215 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1216 return -EFAULT;
1217
1218 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1219 gtod.epoch_idx, gtod.tod);
1220 return 0;
1221}
1222
Jason J. Herne72f25022014-11-25 09:46:02 -05001223static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1224{
1225 u8 gtod_high = 0;
1226
1227 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1228 sizeof(gtod_high)))
1229 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001230 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001231
1232 return 0;
1233}
1234
1235static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1236{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001237 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001238
David Hildenbrand60417fc2015-09-29 16:20:36 +02001239 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001240 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1241 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001242 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001243
1244 return 0;
1245}
1246
1247static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1248{
1249 int ret;
1250
1251 if (attr->flags)
1252 return -EINVAL;
1253
1254 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001255 case KVM_S390_VM_TOD_EXT:
1256 ret = kvm_s390_get_tod_ext(kvm, attr);
1257 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001258 case KVM_S390_VM_TOD_HIGH:
1259 ret = kvm_s390_get_tod_high(kvm, attr);
1260 break;
1261 case KVM_S390_VM_TOD_LOW:
1262 ret = kvm_s390_get_tod_low(kvm, attr);
1263 break;
1264 default:
1265 ret = -ENXIO;
1266 break;
1267 }
1268 return ret;
1269}
1270
Michael Mueller658b6ed2015-02-02 15:49:35 +01001271static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1272{
1273 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001274 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001275 int ret = 0;
1276
1277 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001278 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001279 ret = -EBUSY;
1280 goto out;
1281 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001282 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 if (!proc) {
1284 ret = -ENOMEM;
1285 goto out;
1286 }
1287 if (!copy_from_user(proc, (void __user *)attr->addr,
1288 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001289 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001290 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1291 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001292 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001293 if (proc->ibc > unblocked_ibc)
1294 kvm->arch.model.ibc = unblocked_ibc;
1295 else if (proc->ibc < lowest_ibc)
1296 kvm->arch.model.ibc = lowest_ibc;
1297 else
1298 kvm->arch.model.ibc = proc->ibc;
1299 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001300 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001301 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001302 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1303 kvm->arch.model.ibc,
1304 kvm->arch.model.cpuid);
1305 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1306 kvm->arch.model.fac_list[0],
1307 kvm->arch.model.fac_list[1],
1308 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001309 } else
1310 ret = -EFAULT;
1311 kfree(proc);
1312out:
1313 mutex_unlock(&kvm->lock);
1314 return ret;
1315}
1316
David Hildenbrand15c97052015-03-19 17:36:43 +01001317static int kvm_s390_set_processor_feat(struct kvm *kvm,
1318 struct kvm_device_attr *attr)
1319{
1320 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001321
1322 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1323 return -EFAULT;
1324 if (!bitmap_subset((unsigned long *) data.feat,
1325 kvm_s390_available_cpu_feat,
1326 KVM_S390_VM_CPU_FEAT_NR_BITS))
1327 return -EINVAL;
1328
1329 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001330 if (kvm->created_vcpus) {
1331 mutex_unlock(&kvm->lock);
1332 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001333 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001334 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1335 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001336 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001337 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1338 data.feat[0],
1339 data.feat[1],
1340 data.feat[2]);
1341 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001342}
1343
David Hildenbrand0a763c72016-05-18 16:03:47 +02001344static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1345 struct kvm_device_attr *attr)
1346{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001347 mutex_lock(&kvm->lock);
1348 if (kvm->created_vcpus) {
1349 mutex_unlock(&kvm->lock);
1350 return -EBUSY;
1351 }
1352
1353 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1354 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1355 mutex_unlock(&kvm->lock);
1356 return -EFAULT;
1357 }
1358 mutex_unlock(&kvm->lock);
1359
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001360 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1365 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1366 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1367 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1368 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1370 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1371 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1373 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1374 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1375 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1376 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1377 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1379 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1380 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1381 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1382 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1383 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1384 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1386 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1387 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1388 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1389 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1391 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1392 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1394 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1395 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1396 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1397 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1398 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1399 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1400 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1401 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1402 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1403 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001404 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1406 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001407 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1408 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1409 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001412 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1413 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1414 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001417
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001418 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001419}
1420
Michael Mueller658b6ed2015-02-02 15:49:35 +01001421static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1422{
1423 int ret = -ENXIO;
1424
1425 switch (attr->attr) {
1426 case KVM_S390_VM_CPU_PROCESSOR:
1427 ret = kvm_s390_set_processor(kvm, attr);
1428 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001429 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1430 ret = kvm_s390_set_processor_feat(kvm, attr);
1431 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001432 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1433 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1434 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001435 }
1436 return ret;
1437}
1438
1439static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1440{
1441 struct kvm_s390_vm_cpu_processor *proc;
1442 int ret = 0;
1443
Christian Borntraegerc4196212020-11-06 08:34:23 +01001444 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001445 if (!proc) {
1446 ret = -ENOMEM;
1447 goto out;
1448 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001449 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001450 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001451 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1452 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001453 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1454 kvm->arch.model.ibc,
1455 kvm->arch.model.cpuid);
1456 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1457 kvm->arch.model.fac_list[0],
1458 kvm->arch.model.fac_list[1],
1459 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001460 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1461 ret = -EFAULT;
1462 kfree(proc);
1463out:
1464 return ret;
1465}
1466
1467static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1468{
1469 struct kvm_s390_vm_cpu_machine *mach;
1470 int ret = 0;
1471
Christian Borntraegerc4196212020-11-06 08:34:23 +01001472 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001473 if (!mach) {
1474 ret = -ENOMEM;
1475 goto out;
1476 }
1477 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001478 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001479 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001480 S390_ARCH_FAC_LIST_SIZE_BYTE);
Sven Schnelle17e89e12021-05-05 22:01:10 +02001481 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1482 sizeof(stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001483 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1484 kvm->arch.model.ibc,
1485 kvm->arch.model.cpuid);
1486 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1487 mach->fac_mask[0],
1488 mach->fac_mask[1],
1489 mach->fac_mask[2]);
1490 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1491 mach->fac_list[0],
1492 mach->fac_list[1],
1493 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001494 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1495 ret = -EFAULT;
1496 kfree(mach);
1497out:
1498 return ret;
1499}
1500
David Hildenbrand15c97052015-03-19 17:36:43 +01001501static int kvm_s390_get_processor_feat(struct kvm *kvm,
1502 struct kvm_device_attr *attr)
1503{
1504 struct kvm_s390_vm_cpu_feat data;
1505
1506 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1507 KVM_S390_VM_CPU_FEAT_NR_BITS);
1508 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1509 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001510 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1511 data.feat[0],
1512 data.feat[1],
1513 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001514 return 0;
1515}
1516
1517static int kvm_s390_get_machine_feat(struct kvm *kvm,
1518 struct kvm_device_attr *attr)
1519{
1520 struct kvm_s390_vm_cpu_feat data;
1521
1522 bitmap_copy((unsigned long *) data.feat,
1523 kvm_s390_available_cpu_feat,
1524 KVM_S390_VM_CPU_FEAT_NR_BITS);
1525 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1526 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001527 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1528 data.feat[0],
1529 data.feat[1],
1530 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001531 return 0;
1532}
1533
David Hildenbrand0a763c72016-05-18 16:03:47 +02001534static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1535 struct kvm_device_attr *attr)
1536{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001537 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1538 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1539 return -EFAULT;
1540
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001541 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1546 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1548 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1549 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1551 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1552 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1554 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1555 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1556 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1557 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1558 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1561 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1562 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1563 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1564 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1565 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1567 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1569 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1570 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1572 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1573 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1575 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1576 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1577 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1578 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1579 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1580 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1581 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1582 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1583 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1584 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001585 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1587 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001588 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1590 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001593 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1594 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1595 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001598
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001599 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001600}
1601
1602static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1603 struct kvm_device_attr *attr)
1604{
1605 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1606 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1607 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001608
1609 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1612 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1613 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1614 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1615 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1616 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1617 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1618 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1619 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1620 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1621 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1622 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1623 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1624 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1625 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1626 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1627 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1628 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1629 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1630 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1631 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1632 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1633 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1634 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1635 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1636 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1637 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1638 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1639 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1640 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1641 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1642 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1643 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1644 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1645 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1646 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1647 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1648 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1649 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1650 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1651 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1652 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001653 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1654 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1655 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001656 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1657 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1658 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1659 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1660 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001661 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1662 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1663 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1664 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1665 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001666
David Hildenbrand0a763c72016-05-18 16:03:47 +02001667 return 0;
1668}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001669
Michael Mueller658b6ed2015-02-02 15:49:35 +01001670static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1671{
1672 int ret = -ENXIO;
1673
1674 switch (attr->attr) {
1675 case KVM_S390_VM_CPU_PROCESSOR:
1676 ret = kvm_s390_get_processor(kvm, attr);
1677 break;
1678 case KVM_S390_VM_CPU_MACHINE:
1679 ret = kvm_s390_get_machine(kvm, attr);
1680 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001681 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1682 ret = kvm_s390_get_processor_feat(kvm, attr);
1683 break;
1684 case KVM_S390_VM_CPU_MACHINE_FEAT:
1685 ret = kvm_s390_get_machine_feat(kvm, attr);
1686 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001687 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1688 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1689 break;
1690 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1691 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1692 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001693 }
1694 return ret;
1695}
1696
Dominik Dingelf2061652014-04-09 13:13:00 +02001697static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1698{
1699 int ret;
1700
1701 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001702 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001703 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001704 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001705 case KVM_S390_VM_TOD:
1706 ret = kvm_s390_set_tod(kvm, attr);
1707 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001708 case KVM_S390_VM_CPU_MODEL:
1709 ret = kvm_s390_set_cpu_model(kvm, attr);
1710 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001711 case KVM_S390_VM_CRYPTO:
1712 ret = kvm_s390_vm_set_crypto(kvm, attr);
1713 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001714 case KVM_S390_VM_MIGRATION:
1715 ret = kvm_s390_vm_set_migration(kvm, attr);
1716 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001717 default:
1718 ret = -ENXIO;
1719 break;
1720 }
1721
1722 return ret;
1723}
1724
1725static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1726{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001727 int ret;
1728
1729 switch (attr->group) {
1730 case KVM_S390_VM_MEM_CTRL:
1731 ret = kvm_s390_get_mem_control(kvm, attr);
1732 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001733 case KVM_S390_VM_TOD:
1734 ret = kvm_s390_get_tod(kvm, attr);
1735 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001736 case KVM_S390_VM_CPU_MODEL:
1737 ret = kvm_s390_get_cpu_model(kvm, attr);
1738 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001739 case KVM_S390_VM_MIGRATION:
1740 ret = kvm_s390_vm_get_migration(kvm, attr);
1741 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001742 default:
1743 ret = -ENXIO;
1744 break;
1745 }
1746
1747 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001748}
1749
1750static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1751{
1752 int ret;
1753
1754 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001755 case KVM_S390_VM_MEM_CTRL:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_MEM_ENABLE_CMMA:
1758 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001759 ret = sclp.has_cmma ? 0 : -ENXIO;
1760 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001761 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001762 ret = 0;
1763 break;
1764 default:
1765 ret = -ENXIO;
1766 break;
1767 }
1768 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001769 case KVM_S390_VM_TOD:
1770 switch (attr->attr) {
1771 case KVM_S390_VM_TOD_LOW:
1772 case KVM_S390_VM_TOD_HIGH:
1773 ret = 0;
1774 break;
1775 default:
1776 ret = -ENXIO;
1777 break;
1778 }
1779 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001780 case KVM_S390_VM_CPU_MODEL:
1781 switch (attr->attr) {
1782 case KVM_S390_VM_CPU_PROCESSOR:
1783 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001784 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1785 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001786 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001787 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001788 ret = 0;
1789 break;
1790 default:
1791 ret = -ENXIO;
1792 break;
1793 }
1794 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001795 case KVM_S390_VM_CRYPTO:
1796 switch (attr->attr) {
1797 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1798 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1799 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1800 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1801 ret = 0;
1802 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001803 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1804 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1805 ret = ap_instructions_available() ? 0 : -ENXIO;
1806 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001807 default:
1808 ret = -ENXIO;
1809 break;
1810 }
1811 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001812 case KVM_S390_VM_MIGRATION:
1813 ret = 0;
1814 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001815 default:
1816 ret = -ENXIO;
1817 break;
1818 }
1819
1820 return ret;
1821}
1822
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001823static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1824{
1825 uint8_t *keys;
1826 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001827 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001828
1829 if (args->flags != 0)
1830 return -EINVAL;
1831
1832 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001833 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001834 return KVM_S390_GET_SKEYS_NONE;
1835
1836 /* Enforce sane limit on memory allocation */
1837 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1838 return -EINVAL;
1839
Christian Borntraegerc4196212020-11-06 08:34:23 +01001840 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001841 if (!keys)
1842 return -ENOMEM;
1843
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001844 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001845 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001846 for (i = 0; i < args->count; i++) {
1847 hva = gfn_to_hva(kvm, args->start_gfn + i);
1848 if (kvm_is_error_hva(hva)) {
1849 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001850 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001851 }
1852
David Hildenbrand154c8c12016-05-09 11:22:34 +02001853 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1854 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001855 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001856 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001857 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001858 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001859
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001860 if (!r) {
1861 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1862 sizeof(uint8_t) * args->count);
1863 if (r)
1864 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001865 }
1866
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867 kvfree(keys);
1868 return r;
1869}
1870
1871static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1872{
1873 uint8_t *keys;
1874 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001875 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001876 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001877
1878 if (args->flags != 0)
1879 return -EINVAL;
1880
1881 /* Enforce sane limit on memory allocation */
1882 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1883 return -EINVAL;
1884
Christian Borntraegerc4196212020-11-06 08:34:23 +01001885 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001886 if (!keys)
1887 return -ENOMEM;
1888
1889 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1890 sizeof(uint8_t) * args->count);
1891 if (r) {
1892 r = -EFAULT;
1893 goto out;
1894 }
1895
1896 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001897 r = s390_enable_skey();
1898 if (r)
1899 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001900
Janosch Frankbd096f62018-07-18 13:40:22 +01001901 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001902 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001903 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001904 while (i < args->count) {
1905 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001906 hva = gfn_to_hva(kvm, args->start_gfn + i);
1907 if (kvm_is_error_hva(hva)) {
1908 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001909 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001910 }
1911
1912 /* Lowest order bit is reserved */
1913 if (keys[i] & 0x01) {
1914 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001915 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001916 }
1917
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001918 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001919 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001920 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001921 FAULT_FLAG_WRITE, &unlocked);
1922 if (r)
1923 break;
1924 }
1925 if (!r)
1926 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001927 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001928 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001929 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001930out:
1931 kvfree(keys);
1932 return r;
1933}
1934
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001935/*
1936 * Base address and length must be sent at the start of each block, therefore
1937 * it's cheaper to send some clean data, as long as it's less than the size of
1938 * two longs.
1939 */
1940#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1941/* for consistency */
1942#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1943
1944/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001945 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1946 * address falls in a hole. In that case the index of one of the memslots
1947 * bordering the hole is returned.
1948 */
1949static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1950{
1951 int start = 0, end = slots->used_slots;
David Matlack87689272021-08-04 22:28:38 +00001952 int slot = atomic_read(&slots->last_used_slot);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001953 struct kvm_memory_slot *memslots = slots->memslots;
1954
1955 if (gfn >= memslots[slot].base_gfn &&
1956 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1957 return slot;
1958
1959 while (start < end) {
1960 slot = start + (end - start) / 2;
1961
1962 if (gfn >= memslots[slot].base_gfn)
1963 end = slot;
1964 else
1965 start = slot + 1;
1966 }
1967
Sean Christopherson97daa022020-04-07 23:40:59 -07001968 if (start >= slots->used_slots)
1969 return slots->used_slots - 1;
1970
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001971 if (gfn >= memslots[start].base_gfn &&
1972 gfn < memslots[start].base_gfn + memslots[start].npages) {
David Matlack87689272021-08-04 22:28:38 +00001973 atomic_set(&slots->last_used_slot, start);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001974 }
1975
1976 return start;
1977}
1978
1979static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1980 u8 *res, unsigned long bufsize)
1981{
1982 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1983
1984 args->count = 0;
1985 while (args->count < bufsize) {
1986 hva = gfn_to_hva(kvm, cur_gfn);
1987 /*
1988 * We return an error if the first value was invalid, but we
1989 * return successfully if at least one value was copied.
1990 */
1991 if (kvm_is_error_hva(hva))
1992 return args->count ? 0 : -EFAULT;
1993 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1994 pgstev = 0;
1995 res[args->count++] = (pgstev >> 24) & 0x43;
1996 cur_gfn++;
1997 }
1998
1999 return 0;
2000}
2001
2002static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2003 unsigned long cur_gfn)
2004{
2005 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2006 struct kvm_memory_slot *ms = slots->memslots + slotidx;
2007 unsigned long ofs = cur_gfn - ms->base_gfn;
2008
2009 if (ms->base_gfn + ms->npages <= cur_gfn) {
2010 slotidx--;
2011 /* If we are above the highest slot, wrap around */
2012 if (slotidx < 0)
2013 slotidx = slots->used_slots - 1;
2014
2015 ms = slots->memslots + slotidx;
2016 ofs = 0;
2017 }
2018 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2019 while ((slotidx > 0) && (ofs >= ms->npages)) {
2020 slotidx--;
2021 ms = slots->memslots + slotidx;
2022 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2023 }
2024 return ms->base_gfn + ofs;
2025}
2026
2027static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2028 u8 *res, unsigned long bufsize)
2029{
2030 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2031 struct kvm_memslots *slots = kvm_memslots(kvm);
2032 struct kvm_memory_slot *ms;
2033
Sean Christopherson0774a962020-03-20 13:55:40 -07002034 if (unlikely(!slots->used_slots))
2035 return 0;
2036
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002037 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2038 ms = gfn_to_memslot(kvm, cur_gfn);
2039 args->count = 0;
2040 args->start_gfn = cur_gfn;
2041 if (!ms)
2042 return 0;
2043 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2044 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2045
2046 while (args->count < bufsize) {
2047 hva = gfn_to_hva(kvm, cur_gfn);
2048 if (kvm_is_error_hva(hva))
2049 return 0;
2050 /* Decrement only if we actually flipped the bit to 0 */
2051 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2052 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2053 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2054 pgstev = 0;
2055 /* Save the value */
2056 res[args->count++] = (pgstev >> 24) & 0x43;
2057 /* If the next bit is too far away, stop. */
2058 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2059 return 0;
2060 /* If we reached the previous "next", find the next one */
2061 if (cur_gfn == next_gfn)
2062 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2063 /* Reached the end of memory or of the buffer, stop */
2064 if ((next_gfn >= mem_end) ||
2065 (next_gfn - args->start_gfn >= bufsize))
2066 return 0;
2067 cur_gfn++;
2068 /* Reached the end of the current memslot, take the next one. */
2069 if (cur_gfn - ms->base_gfn >= ms->npages) {
2070 ms = gfn_to_memslot(kvm, cur_gfn);
2071 if (!ms)
2072 return 0;
2073 }
2074 }
2075 return 0;
2076}
2077
2078/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002079 * This function searches for the next page with dirty CMMA attributes, and
2080 * saves the attributes in the buffer up to either the end of the buffer or
2081 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2082 * no trailing clean bytes are saved.
2083 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2084 * output buffer will indicate 0 as length.
2085 */
2086static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2087 struct kvm_s390_cmma_log *args)
2088{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002089 unsigned long bufsize;
2090 int srcu_idx, peek, ret;
2091 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002093 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002094 return -ENXIO;
2095 /* Invalid/unsupported flags were specified */
2096 if (args->flags & ~KVM_S390_CMMA_PEEK)
2097 return -EINVAL;
2098 /* Migration mode query, and we are not doing a migration */
2099 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002100 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002101 return -EINVAL;
2102 /* CMMA is disabled or was not used, or the buffer has length zero */
2103 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002104 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002105 memset(args, 0, sizeof(*args));
2106 return 0;
2107 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002108 /* We are not peeking, and there are no dirty pages */
2109 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2110 memset(args, 0, sizeof(*args));
2111 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002112 }
2113
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002114 values = vmalloc(bufsize);
2115 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002116 return -ENOMEM;
2117
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002118 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002119 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002120 if (peek)
2121 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2122 else
2123 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002124 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002125 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002126
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002127 if (kvm->arch.migration_mode)
2128 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2129 else
2130 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002131
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002132 if (copy_to_user((void __user *)args->values, values, args->count))
2133 ret = -EFAULT;
2134
2135 vfree(values);
2136 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002137}
2138
2139/*
2140 * This function sets the CMMA attributes for the given pages. If the input
2141 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002142 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002143 */
2144static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2145 const struct kvm_s390_cmma_log *args)
2146{
2147 unsigned long hva, mask, pgstev, i;
2148 uint8_t *bits;
2149 int srcu_idx, r = 0;
2150
2151 mask = args->mask;
2152
2153 if (!kvm->arch.use_cmma)
2154 return -ENXIO;
2155 /* invalid/unsupported flags */
2156 if (args->flags != 0)
2157 return -EINVAL;
2158 /* Enforce sane limit on memory allocation */
2159 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2160 return -EINVAL;
2161 /* Nothing to do */
2162 if (args->count == 0)
2163 return 0;
2164
Kees Cook42bc47b2018-06-12 14:27:11 -07002165 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002166 if (!bits)
2167 return -ENOMEM;
2168
2169 r = copy_from_user(bits, (void __user *)args->values, args->count);
2170 if (r) {
2171 r = -EFAULT;
2172 goto out;
2173 }
2174
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002175 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002176 srcu_idx = srcu_read_lock(&kvm->srcu);
2177 for (i = 0; i < args->count; i++) {
2178 hva = gfn_to_hva(kvm, args->start_gfn + i);
2179 if (kvm_is_error_hva(hva)) {
2180 r = -EFAULT;
2181 break;
2182 }
2183
2184 pgstev = bits[i];
2185 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002186 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002187 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2188 }
2189 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002190 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002191
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002192 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002193 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002194 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002195 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002196 }
2197out:
2198 vfree(bits);
2199 return r;
2200}
2201
Janosch Frank29b40f12019-09-30 04:19:18 -04002202static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2203{
2204 struct kvm_vcpu *vcpu;
2205 u16 rc, rrc;
2206 int ret = 0;
2207 int i;
2208
2209 /*
2210 * We ignore failures and try to destroy as many CPUs as possible.
2211 * At the same time we must not free the assigned resources when
2212 * this fails, as the ultravisor has still access to that memory.
2213 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2214 * behind.
2215 * We want to return the first failure rc and rrc, though.
2216 */
2217 kvm_for_each_vcpu(i, vcpu, kvm) {
2218 mutex_lock(&vcpu->mutex);
2219 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2220 *rcp = rc;
2221 *rrcp = rrc;
2222 ret = -EIO;
2223 }
2224 mutex_unlock(&vcpu->mutex);
2225 }
2226 return ret;
2227}
2228
2229static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2230{
2231 int i, r = 0;
2232 u16 dummy;
2233
2234 struct kvm_vcpu *vcpu;
2235
2236 kvm_for_each_vcpu(i, vcpu, kvm) {
2237 mutex_lock(&vcpu->mutex);
2238 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2239 mutex_unlock(&vcpu->mutex);
2240 if (r)
2241 break;
2242 }
2243 if (r)
2244 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2245 return r;
2246}
2247
2248static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2249{
2250 int r = 0;
2251 u16 dummy;
2252 void __user *argp = (void __user *)cmd->data;
2253
2254 switch (cmd->cmd) {
2255 case KVM_PV_ENABLE: {
2256 r = -EINVAL;
2257 if (kvm_s390_pv_is_protected(kvm))
2258 break;
2259
2260 /*
2261 * FMT 4 SIE needs esca. As we never switch back to bsca from
2262 * esca, we need no cleanup in the error cases below
2263 */
2264 r = sca_switch_to_extended(kvm);
2265 if (r)
2266 break;
2267
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002268 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002269 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002270 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002271 if (r)
2272 break;
2273
Janosch Frank29b40f12019-09-30 04:19:18 -04002274 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2275 if (r)
2276 break;
2277
2278 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2279 if (r)
2280 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002281
2282 /* we need to block service interrupts from now on */
2283 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002284 break;
2285 }
2286 case KVM_PV_DISABLE: {
2287 r = -EINVAL;
2288 if (!kvm_s390_pv_is_protected(kvm))
2289 break;
2290
2291 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2292 /*
2293 * If a CPU could not be destroyed, destroy VM will also fail.
2294 * There is no point in trying to destroy it. Instead return
2295 * the rc and rrc from the first CPU that failed destroying.
2296 */
2297 if (r)
2298 break;
2299 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002300
2301 /* no need to block service interrupts any more */
2302 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002303 break;
2304 }
2305 case KVM_PV_SET_SEC_PARMS: {
2306 struct kvm_s390_pv_sec_parm parms = {};
2307 void *hdr;
2308
2309 r = -EINVAL;
2310 if (!kvm_s390_pv_is_protected(kvm))
2311 break;
2312
2313 r = -EFAULT;
2314 if (copy_from_user(&parms, argp, sizeof(parms)))
2315 break;
2316
2317 /* Currently restricted to 8KB */
2318 r = -EINVAL;
2319 if (parms.length > PAGE_SIZE * 2)
2320 break;
2321
2322 r = -ENOMEM;
2323 hdr = vmalloc(parms.length);
2324 if (!hdr)
2325 break;
2326
2327 r = -EFAULT;
2328 if (!copy_from_user(hdr, (void __user *)parms.origin,
2329 parms.length))
2330 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2331 &cmd->rc, &cmd->rrc);
2332
2333 vfree(hdr);
2334 break;
2335 }
2336 case KVM_PV_UNPACK: {
2337 struct kvm_s390_pv_unp unp = {};
2338
2339 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002340 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002341 break;
2342
2343 r = -EFAULT;
2344 if (copy_from_user(&unp, argp, sizeof(unp)))
2345 break;
2346
2347 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2348 &cmd->rc, &cmd->rrc);
2349 break;
2350 }
2351 case KVM_PV_VERIFY: {
2352 r = -EINVAL;
2353 if (!kvm_s390_pv_is_protected(kvm))
2354 break;
2355
2356 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2357 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2358 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2359 cmd->rrc);
2360 break;
2361 }
Janosch Franke0d27732019-05-09 13:07:21 +02002362 case KVM_PV_PREP_RESET: {
2363 r = -EINVAL;
2364 if (!kvm_s390_pv_is_protected(kvm))
2365 break;
2366
2367 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2368 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2369 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2370 cmd->rc, cmd->rrc);
2371 break;
2372 }
2373 case KVM_PV_UNSHARE_ALL: {
2374 r = -EINVAL;
2375 if (!kvm_s390_pv_is_protected(kvm))
2376 break;
2377
2378 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2379 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2380 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2381 cmd->rc, cmd->rrc);
2382 break;
2383 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002384 default:
2385 r = -ENOTTY;
2386 }
2387 return r;
2388}
2389
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002390long kvm_arch_vm_ioctl(struct file *filp,
2391 unsigned int ioctl, unsigned long arg)
2392{
2393 struct kvm *kvm = filp->private_data;
2394 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002395 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002396 int r;
2397
2398 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002399 case KVM_S390_INTERRUPT: {
2400 struct kvm_s390_interrupt s390int;
2401
2402 r = -EFAULT;
2403 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2404 break;
2405 r = kvm_s390_inject_vm(kvm, &s390int);
2406 break;
2407 }
Cornelia Huck84223592013-07-15 13:36:01 +02002408 case KVM_CREATE_IRQCHIP: {
2409 struct kvm_irq_routing_entry routing;
2410
2411 r = -EINVAL;
2412 if (kvm->arch.use_irqchip) {
2413 /* Set up dummy routing. */
2414 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002415 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002416 }
2417 break;
2418 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002419 case KVM_SET_DEVICE_ATTR: {
2420 r = -EFAULT;
2421 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2422 break;
2423 r = kvm_s390_vm_set_attr(kvm, &attr);
2424 break;
2425 }
2426 case KVM_GET_DEVICE_ATTR: {
2427 r = -EFAULT;
2428 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2429 break;
2430 r = kvm_s390_vm_get_attr(kvm, &attr);
2431 break;
2432 }
2433 case KVM_HAS_DEVICE_ATTR: {
2434 r = -EFAULT;
2435 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2436 break;
2437 r = kvm_s390_vm_has_attr(kvm, &attr);
2438 break;
2439 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002440 case KVM_S390_GET_SKEYS: {
2441 struct kvm_s390_skeys args;
2442
2443 r = -EFAULT;
2444 if (copy_from_user(&args, argp,
2445 sizeof(struct kvm_s390_skeys)))
2446 break;
2447 r = kvm_s390_get_skeys(kvm, &args);
2448 break;
2449 }
2450 case KVM_S390_SET_SKEYS: {
2451 struct kvm_s390_skeys args;
2452
2453 r = -EFAULT;
2454 if (copy_from_user(&args, argp,
2455 sizeof(struct kvm_s390_skeys)))
2456 break;
2457 r = kvm_s390_set_skeys(kvm, &args);
2458 break;
2459 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002460 case KVM_S390_GET_CMMA_BITS: {
2461 struct kvm_s390_cmma_log args;
2462
2463 r = -EFAULT;
2464 if (copy_from_user(&args, argp, sizeof(args)))
2465 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002466 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002467 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002468 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002469 if (!r) {
2470 r = copy_to_user(argp, &args, sizeof(args));
2471 if (r)
2472 r = -EFAULT;
2473 }
2474 break;
2475 }
2476 case KVM_S390_SET_CMMA_BITS: {
2477 struct kvm_s390_cmma_log args;
2478
2479 r = -EFAULT;
2480 if (copy_from_user(&args, argp, sizeof(args)))
2481 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002482 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002483 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002484 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002485 break;
2486 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002487 case KVM_S390_PV_COMMAND: {
2488 struct kvm_pv_cmd args;
2489
Eric Farman67cf68b2021-10-08 22:31:12 +02002490 /* protvirt means user cpu state */
2491 kvm_s390_set_user_cpu_state_ctrl(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002492 r = 0;
2493 if (!is_prot_virt_host()) {
2494 r = -EINVAL;
2495 break;
2496 }
2497 if (copy_from_user(&args, argp, sizeof(args))) {
2498 r = -EFAULT;
2499 break;
2500 }
2501 if (args.flags) {
2502 r = -EINVAL;
2503 break;
2504 }
2505 mutex_lock(&kvm->lock);
2506 r = kvm_s390_handle_pv(kvm, &args);
2507 mutex_unlock(&kvm->lock);
2508 if (copy_to_user(argp, &args, sizeof(args))) {
2509 r = -EFAULT;
2510 break;
2511 }
2512 break;
2513 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002514 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002515 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516 }
2517
2518 return r;
2519}
2520
Tony Krowiak45c9b472015-01-13 11:33:26 -05002521static int kvm_s390_apxa_installed(void)
2522{
Tony Krowiake585b242018-09-25 19:16:18 -04002523 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002524
Tony Krowiake585b242018-09-25 19:16:18 -04002525 if (ap_instructions_available()) {
2526 if (ap_qci(&info) == 0)
2527 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002528 }
2529
2530 return 0;
2531}
2532
Tony Krowiake585b242018-09-25 19:16:18 -04002533/*
2534 * The format of the crypto control block (CRYCB) is specified in the 3 low
2535 * order bits of the CRYCB designation (CRYCBD) field as follows:
2536 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2537 * AP extended addressing (APXA) facility are installed.
2538 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2539 * Format 2: Both the APXA and MSAX3 facilities are installed
2540 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002541static void kvm_s390_set_crycb_format(struct kvm *kvm)
2542{
2543 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2544
Tony Krowiake585b242018-09-25 19:16:18 -04002545 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2546 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2547
2548 /* Check whether MSAX3 is installed */
2549 if (!test_kvm_facility(kvm, 76))
2550 return;
2551
Tony Krowiak45c9b472015-01-13 11:33:26 -05002552 if (kvm_s390_apxa_installed())
2553 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2554 else
2555 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2556}
2557
Tony Krowiak86956e72021-08-23 17:20:47 -04002558/*
2559 * kvm_arch_crypto_set_masks
2560 *
2561 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2562 * to be set.
2563 * @apm: the mask identifying the accessible AP adapters
2564 * @aqm: the mask identifying the accessible AP domains
2565 * @adm: the mask identifying the accessible AP control domains
2566 *
2567 * Set the masks that identify the adapters, domains and control domains to
2568 * which the KVM guest is granted access.
2569 *
2570 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2571 * function.
2572 */
Pierre Morel0e237e42018-10-05 10:31:09 +02002573void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2574 unsigned long *aqm, unsigned long *adm)
2575{
2576 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2577
Pierre Morel0e237e42018-10-05 10:31:09 +02002578 kvm_s390_vcpu_block_all(kvm);
2579
2580 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2581 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2582 memcpy(crycb->apcb1.apm, apm, 32);
2583 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2584 apm[0], apm[1], apm[2], apm[3]);
2585 memcpy(crycb->apcb1.aqm, aqm, 32);
2586 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2587 aqm[0], aqm[1], aqm[2], aqm[3]);
2588 memcpy(crycb->apcb1.adm, adm, 32);
2589 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2590 adm[0], adm[1], adm[2], adm[3]);
2591 break;
2592 case CRYCB_FORMAT1:
2593 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2594 memcpy(crycb->apcb0.apm, apm, 8);
2595 memcpy(crycb->apcb0.aqm, aqm, 2);
2596 memcpy(crycb->apcb0.adm, adm, 2);
2597 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2598 apm[0], *((unsigned short *)aqm),
2599 *((unsigned short *)adm));
2600 break;
2601 default: /* Can not happen */
2602 break;
2603 }
2604
2605 /* recreate the shadow crycb for each vcpu */
2606 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2607 kvm_s390_vcpu_unblock_all(kvm);
Pierre Morel0e237e42018-10-05 10:31:09 +02002608}
2609EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2610
Tony Krowiak86956e72021-08-23 17:20:47 -04002611/*
2612 * kvm_arch_crypto_clear_masks
2613 *
2614 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2615 * to be cleared.
2616 *
2617 * Clear the masks that identify the adapters, domains and control domains to
2618 * which the KVM guest is granted access.
2619 *
2620 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2621 * function.
2622 */
Tony Krowiak421045982018-09-25 19:16:25 -04002623void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2624{
Tony Krowiak421045982018-09-25 19:16:25 -04002625 kvm_s390_vcpu_block_all(kvm);
2626
2627 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2628 sizeof(kvm->arch.crypto.crycb->apcb0));
2629 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2630 sizeof(kvm->arch.crypto.crycb->apcb1));
2631
Pierre Morel0e237e42018-10-05 10:31:09 +02002632 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002633 /* recreate the shadow crycb for each vcpu */
2634 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002635 kvm_s390_vcpu_unblock_all(kvm);
Tony Krowiak421045982018-09-25 19:16:25 -04002636}
2637EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2638
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002639static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002640{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002641 struct cpuid cpuid;
2642
2643 get_cpu_id(&cpuid);
2644 cpuid.version = 0xff;
2645 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002646}
2647
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002648static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002649{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002650 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002651 kvm_s390_set_crycb_format(kvm);
Tony Krowiak1e753732021-08-23 17:20:46 -04002652 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002653
Tony Krowiake585b242018-09-25 19:16:18 -04002654 if (!test_kvm_facility(kvm, 76))
2655 return;
2656
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002657 /* Enable AES/DEA protected key functions by default */
2658 kvm->arch.crypto.aes_kw = 1;
2659 kvm->arch.crypto.dea_kw = 1;
2660 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2661 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2662 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2663 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002664}
2665
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002666static void sca_dispose(struct kvm *kvm)
2667{
2668 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002669 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002670 else
2671 free_page((unsigned long)(kvm->arch.sca));
2672 kvm->arch.sca = NULL;
2673}
2674
Carsten Ottee08b9632012-01-04 10:25:20 +01002675int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002676{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002677 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002678 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002679 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002680 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002681
Carsten Ottee08b9632012-01-04 10:25:20 +01002682 rc = -EINVAL;
2683#ifdef CONFIG_KVM_S390_UCONTROL
2684 if (type & ~KVM_VM_S390_UCONTROL)
2685 goto out_err;
2686 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2687 goto out_err;
2688#else
2689 if (type)
2690 goto out_err;
2691#endif
2692
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693 rc = s390_enable_sie();
2694 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002695 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002696
Carsten Otteb2904112011-10-18 12:27:13 +02002697 rc = -ENOMEM;
2698
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002699 if (!sclp.has_64bscao)
2700 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002701 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002702 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002703 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002704 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002705 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002706 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002707 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002708 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002709 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002710 kvm->arch.sca = (struct bsca_block *)
2711 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002712 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002713
2714 sprintf(debug_name, "kvm-%u", current->pid);
2715
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002716 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002717 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002718 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002719
Michael Mueller19114be2017-05-30 14:26:02 +02002720 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002721 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002722 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002723 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002724 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002725
Michael Mueller25c84db2019-01-31 09:52:41 +01002726 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002727 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002728
2729 for (i = 0; i < kvm_s390_fac_size(); i++) {
Sven Schnelle17e89e12021-05-05 22:01:10 +02002730 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002731 (kvm_s390_fac_base[i] |
2732 kvm_s390_fac_ext[i]);
Sven Schnelle17e89e12021-05-05 22:01:10 +02002733 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002734 kvm_s390_fac_base[i];
2735 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002736 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002737
David Hildenbrand19352222017-08-29 16:31:08 +02002738 /* we are always in czam mode - even on pre z14 machines */
2739 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2740 set_kvm_facility(kvm->arch.model.fac_list, 138);
2741 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002742 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2743 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002744 if (MACHINE_HAS_TLB_GUEST) {
2745 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2746 set_kvm_facility(kvm->arch.model.fac_list, 147);
2747 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002748
Pierre Morel05f31e32019-05-21 17:34:37 +02002749 if (css_general_characteristics.aiv && test_facility(65))
2750 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2751
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002752 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002753 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002754
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002755 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002756
Fei Li51978392017-02-17 17:06:26 +08002757 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002758 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002759 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2760 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002761 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002762 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002763
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002764 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002765 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002766
Carsten Ottee08b9632012-01-04 10:25:20 +01002767 if (type & KVM_VM_S390_UCONTROL) {
2768 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002769 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002770 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002771 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002772 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002773 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002774 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002775 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002776 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002777 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002778 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002779 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002780 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002781 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002782
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002783 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002784 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002785 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002786 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002787 if (use_gisa)
2788 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002789 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002790
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002791 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002792out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002793 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002794 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002795 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002796 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002797 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002798}
2799
Christian Borntraegerd329c032008-11-26 14:50:27 +01002800void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2801{
Janosch Frank29b40f12019-09-30 04:19:18 -04002802 u16 rc, rrc;
2803
Christian Borntraegerd329c032008-11-26 14:50:27 +01002804 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002805 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002806 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002807 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002808 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002809 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002810
2811 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002812 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002813
Dominik Dingele6db1d62015-05-07 15:41:57 +02002814 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002815 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002816 /* We can not hold the vcpu mutex here, we are already dying */
2817 if (kvm_s390_pv_cpu_get_handle(vcpu))
2818 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002819 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002820}
2821
2822static void kvm_free_vcpus(struct kvm *kvm)
2823{
2824 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002825 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002826
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002827 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002828 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002829
2830 mutex_lock(&kvm->lock);
2831 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2832 kvm->vcpus[i] = NULL;
2833
2834 atomic_set(&kvm->online_vcpus, 0);
2835 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002836}
2837
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002838void kvm_arch_destroy_vm(struct kvm *kvm)
2839{
Janosch Frank29b40f12019-09-30 04:19:18 -04002840 u16 rc, rrc;
2841
Christian Borntraegerd329c032008-11-26 14:50:27 +01002842 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002843 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002844 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002845 /*
2846 * We are already at the end of life and kvm->lock is not taken.
2847 * This is ok as the file descriptor is closed by now and nobody
2848 * can mess with the pv state. To avoid lockdep_assert_held from
2849 * complaining we do not use kvm_s390_pv_is_protected.
2850 */
2851 if (kvm_s390_pv_get_handle(kvm))
2852 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2853 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002854 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002855 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002856 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002857 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002858 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002859 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002860 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002861}
2862
2863/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002864static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2865{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002866 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002867 if (!vcpu->arch.gmap)
2868 return -ENOMEM;
2869 vcpu->arch.gmap->private = vcpu->kvm;
2870
2871 return 0;
2872}
2873
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002874static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2875{
David Hildenbranda6940672016-08-08 22:39:32 +02002876 if (!kvm_s390_use_sca_entries())
2877 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002878 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002879 if (vcpu->kvm->arch.use_esca) {
2880 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002881
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002882 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002883 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002884 } else {
2885 struct bsca_block *sca = vcpu->kvm->arch.sca;
2886
2887 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002888 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002889 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002890 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002891}
2892
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002893static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002894{
David Hildenbranda6940672016-08-08 22:39:32 +02002895 if (!kvm_s390_use_sca_entries()) {
2896 struct bsca_block *sca = vcpu->kvm->arch.sca;
2897
2898 /* we still need the basic sca for the ipte control */
2899 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2900 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002901 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002902 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002903 read_lock(&vcpu->kvm->arch.sca_lock);
2904 if (vcpu->kvm->arch.use_esca) {
2905 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002906
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002907 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002908 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2909 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002910 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002911 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002912 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002913 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002914
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002915 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002916 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2917 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002918 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002919 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002920 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002921}
2922
2923/* Basic SCA to Extended SCA data copy routines */
2924static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2925{
2926 d->sda = s->sda;
2927 d->sigp_ctrl.c = s->sigp_ctrl.c;
2928 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2929}
2930
2931static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2932{
2933 int i;
2934
2935 d->ipte_control = s->ipte_control;
2936 d->mcn[0] = s->mcn;
2937 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2938 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2939}
2940
2941static int sca_switch_to_extended(struct kvm *kvm)
2942{
2943 struct bsca_block *old_sca = kvm->arch.sca;
2944 struct esca_block *new_sca;
2945 struct kvm_vcpu *vcpu;
2946 unsigned int vcpu_idx;
2947 u32 scaol, scaoh;
2948
Janosch Frank29b40f12019-09-30 04:19:18 -04002949 if (kvm->arch.use_esca)
2950 return 0;
2951
Christian Borntraegerc4196212020-11-06 08:34:23 +01002952 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002953 if (!new_sca)
2954 return -ENOMEM;
2955
2956 scaoh = (u32)((u64)(new_sca) >> 32);
2957 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2958
2959 kvm_s390_vcpu_block_all(kvm);
2960 write_lock(&kvm->arch.sca_lock);
2961
2962 sca_copy_b_to_e(new_sca, old_sca);
2963
2964 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2965 vcpu->arch.sie_block->scaoh = scaoh;
2966 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002967 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002968 }
2969 kvm->arch.sca = new_sca;
2970 kvm->arch.use_esca = 1;
2971
2972 write_unlock(&kvm->arch.sca_lock);
2973 kvm_s390_vcpu_unblock_all(kvm);
2974
2975 free_page((unsigned long)old_sca);
2976
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002977 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2978 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002979 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002980}
2981
2982static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2983{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002984 int rc;
2985
David Hildenbranda6940672016-08-08 22:39:32 +02002986 if (!kvm_s390_use_sca_entries()) {
2987 if (id < KVM_MAX_VCPUS)
2988 return true;
2989 return false;
2990 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002991 if (id < KVM_S390_BSCA_CPU_SLOTS)
2992 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002993 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002994 return false;
2995
2996 mutex_lock(&kvm->lock);
2997 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2998 mutex_unlock(&kvm->lock);
2999
3000 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02003001}
3002
David Hildenbranddb0758b2016-02-15 09:42:25 +01003003/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3004static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3005{
3006 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01003007 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003008 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01003009 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003010}
3011
3012/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3013static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3014{
3015 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01003016 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003017 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3018 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003019 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003020}
3021
3022/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3023static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3024{
3025 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3026 vcpu->arch.cputm_enabled = true;
3027 __start_cpu_timer_accounting(vcpu);
3028}
3029
3030/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3031static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3032{
3033 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3034 __stop_cpu_timer_accounting(vcpu);
3035 vcpu->arch.cputm_enabled = false;
3036}
3037
3038static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3039{
3040 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3041 __enable_cpu_timer_accounting(vcpu);
3042 preempt_enable();
3043}
3044
3045static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3046{
3047 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3048 __disable_cpu_timer_accounting(vcpu);
3049 preempt_enable();
3050}
3051
David Hildenbrand4287f242016-02-15 09:40:12 +01003052/* set the cpu timer - may only be called from the VCPU thread itself */
3053void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3054{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003055 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003056 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003057 if (vcpu->arch.cputm_enabled)
3058 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003059 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003060 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003061 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003062}
3063
David Hildenbranddb0758b2016-02-15 09:42:25 +01003064/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003065__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3066{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003067 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003068 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003069
3070 if (unlikely(!vcpu->arch.cputm_enabled))
3071 return vcpu->arch.sie_block->cputm;
3072
David Hildenbrand9c23a132016-02-17 21:53:33 +01003073 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3074 do {
3075 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3076 /*
3077 * If the writer would ever execute a read in the critical
3078 * section, e.g. in irq context, we have a deadlock.
3079 */
3080 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3081 value = vcpu->arch.sie_block->cputm;
3082 /* if cputm_start is 0, accounting is being started/stopped */
3083 if (likely(vcpu->arch.cputm_start))
3084 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3085 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3086 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003087 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003088}
3089
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003090void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3091{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003092
David Hildenbrand37d9df92015-03-11 16:47:33 +01003093 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003094 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003095 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003096 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003097 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003098}
3099
3100void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3101{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003102 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003103 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003104 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003105 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003106 vcpu->arch.enabled_gmap = gmap_get_enabled();
3107 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003108
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003109}
3110
Dominik Dingel31928aa2014-12-04 15:47:07 +01003111void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003112{
Jason J. Herne72f25022014-11-25 09:46:02 -05003113 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003114 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003115 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003116 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003117 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003118 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003119 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003120 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003121 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003122 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003123 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3124 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003125 /* make vcpu_load load the right gmap on the first trigger */
3126 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003127}
3128
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003129static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3130{
3131 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3132 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3133 return true;
3134 return false;
3135}
3136
3137static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3138{
3139 /* At least one ECC subfunction must be present */
3140 return kvm_has_pckmo_subfunc(kvm, 32) ||
3141 kvm_has_pckmo_subfunc(kvm, 33) ||
3142 kvm_has_pckmo_subfunc(kvm, 34) ||
3143 kvm_has_pckmo_subfunc(kvm, 40) ||
3144 kvm_has_pckmo_subfunc(kvm, 41);
3145
3146}
3147
Tony Krowiak5102ee82014-06-27 14:46:01 -04003148static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3149{
Tony Krowiake585b242018-09-25 19:16:18 -04003150 /*
3151 * If the AP instructions are not being interpreted and the MSAX3
3152 * facility is not configured for the guest, there is nothing to set up.
3153 */
3154 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003155 return;
3156
Tony Krowiake585b242018-09-25 19:16:18 -04003157 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003158 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003159 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003160 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003161
Tony Krowiake585b242018-09-25 19:16:18 -04003162 if (vcpu->kvm->arch.crypto.apie)
3163 vcpu->arch.sie_block->eca |= ECA_APIE;
3164
3165 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003166 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003167 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003168 /* ecc is also wrapped with AES key */
3169 if (kvm_has_pckmo_ecc(vcpu->kvm))
3170 vcpu->arch.sie_block->ecd |= ECD_ECC;
3171 }
3172
Tony Krowiaka374e892014-09-03 10:13:53 +02003173 if (vcpu->kvm->arch.crypto.dea_kw)
3174 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003175}
3176
Dominik Dingelb31605c2014-03-25 13:47:11 +01003177void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3178{
3179 free_page(vcpu->arch.sie_block->cbrlo);
3180 vcpu->arch.sie_block->cbrlo = 0;
3181}
3182
3183int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3184{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003185 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003186 if (!vcpu->arch.sie_block->cbrlo)
3187 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003188 return 0;
3189}
3190
Michael Mueller91520f12015-02-27 14:32:11 +01003191static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3192{
3193 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3194
Michael Mueller91520f12015-02-27 14:32:11 +01003195 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003196 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003197 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003198}
3199
Sean Christophersonff72bb52019-12-18 13:55:20 -08003200static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3201{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003202 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003203 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003204
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003205 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3206 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003207 CPUSTAT_STOPPED);
3208
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003209 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003210 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003211 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003212 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003213
Michael Mueller91520f12015-02-27 14:32:11 +01003214 kvm_s390_vcpu_setup_model(vcpu);
3215
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003216 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3217 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003218 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003219 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003220 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003221 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003222 vcpu->arch.sie_block->ecb |= ECB_TE;
Janis Schoetterl-Glausch7119dec2021-06-29 10:55:30 +02003223 if (!kvm_is_ucontrol(vcpu->kvm))
3224 vcpu->arch.sie_block->ecb |= ECB_SPECI;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003225
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003226 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003227 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003228 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003229 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3230 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003231 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003232 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003233 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003234 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003235 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003236 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003237 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003238 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003239 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003240 vcpu->arch.sie_block->eca |= ECA_VX;
3241 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003242 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003243 if (test_kvm_facility(vcpu->kvm, 139))
3244 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003245 if (test_kvm_facility(vcpu->kvm, 156))
3246 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003247 if (vcpu->arch.sie_block->gd) {
3248 vcpu->arch.sie_block->eca |= ECA_AIV;
3249 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3250 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3251 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003252 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3253 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003254 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003255
3256 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003257 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003258 else
3259 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003260
Dominik Dingele6db1d62015-05-07 15:41:57 +02003261 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003262 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3263 if (rc)
3264 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003265 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003266 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003267 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003268
Collin Walling67d49d52018-08-31 12:51:19 -04003269 vcpu->arch.sie_block->hpid = HPID_KVM;
3270
Tony Krowiak5102ee82014-06-27 14:46:01 -04003271 kvm_s390_vcpu_crypto_setup(vcpu);
3272
Janosch Frank29b40f12019-09-30 04:19:18 -04003273 mutex_lock(&vcpu->kvm->lock);
3274 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3275 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3276 if (rc)
3277 kvm_s390_vcpu_unsetup_cmma(vcpu);
3278 }
3279 mutex_unlock(&vcpu->kvm->lock);
3280
Dominik Dingelb31605c2014-03-25 13:47:11 +01003281 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003282}
3283
Sean Christopherson897cc382019-12-18 13:55:09 -08003284int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3285{
3286 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3287 return -EINVAL;
3288 return 0;
3289}
3290
Sean Christophersone529ef62019-12-18 13:55:15 -08003291int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003292{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003293 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003294 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003295
QingFeng Haoda72ca42017-06-07 11:41:19 +02003296 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003297 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003298 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003299 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003300
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003301 vcpu->arch.sie_block = &sie_page->sie_block;
3302 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3303
David Hildenbrandefed1102015-04-16 12:32:41 +02003304 /* the real guest size will always be smaller than msl */
3305 vcpu->arch.sie_block->mso = 0;
3306 vcpu->arch.sie_block->msl = sclp.hamax;
3307
Sean Christophersone529ef62019-12-18 13:55:15 -08003308 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003309 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003310 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003311 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3312 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003313 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003314
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003315 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3316 kvm_clear_async_pf_completion_queue(vcpu);
3317 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3318 KVM_SYNC_GPRS |
3319 KVM_SYNC_ACRS |
3320 KVM_SYNC_CRS |
3321 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003322 KVM_SYNC_PFAULT |
3323 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003324 kvm_s390_set_prefix(vcpu, 0);
3325 if (test_kvm_facility(vcpu->kvm, 64))
3326 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3327 if (test_kvm_facility(vcpu->kvm, 82))
3328 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3329 if (test_kvm_facility(vcpu->kvm, 133))
3330 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3331 if (test_kvm_facility(vcpu->kvm, 156))
3332 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3333 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3334 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3335 */
3336 if (MACHINE_HAS_VX)
3337 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3338 else
3339 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3340
3341 if (kvm_is_ucontrol(vcpu->kvm)) {
3342 rc = __kvm_ucontrol_vcpu_init(vcpu);
3343 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003344 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003345 }
3346
Sean Christophersone529ef62019-12-18 13:55:15 -08003347 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3348 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3349 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003350
Sean Christophersonff72bb52019-12-18 13:55:20 -08003351 rc = kvm_s390_vcpu_setup(vcpu);
3352 if (rc)
3353 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003354 return 0;
3355
Sean Christophersonff72bb52019-12-18 13:55:20 -08003356out_ucontrol_uninit:
3357 if (kvm_is_ucontrol(vcpu->kvm))
3358 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003359out_free_sie_block:
3360 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003361 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003362}
3363
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003364int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3365{
Halil Pasic9b57e9d2021-10-19 19:53:59 +02003366 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
David Hildenbrand9a022062014-08-05 17:40:47 +02003367 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003368}
3369
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003370bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3371{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003372 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003373}
3374
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003375void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003376{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003377 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003378 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003379}
3380
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003381void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003382{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003383 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003384}
3385
Christian Borntraeger8e236542015-04-09 13:49:04 +02003386static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3387{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003388 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003389 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003390}
3391
David Hildenbrand9ea59722018-09-25 19:16:16 -04003392bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3393{
3394 return atomic_read(&vcpu->arch.sie_block->prog20) &
3395 (PROG_BLOCK_SIE | PROG_REQUEST);
3396}
3397
Christian Borntraeger8e236542015-04-09 13:49:04 +02003398static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3399{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003400 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003401}
3402
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003403/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003404 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003405 * If the CPU is not running (e.g. waiting as idle) the function will
3406 * return immediately. */
3407void exit_sie(struct kvm_vcpu *vcpu)
3408{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003409 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003410 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003411 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3412 cpu_relax();
3413}
3414
Christian Borntraeger8e236542015-04-09 13:49:04 +02003415/* Kick a guest cpu out of SIE to process a request synchronously */
3416void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003417{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003418 kvm_make_request(req, vcpu);
3419 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003420}
3421
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003422static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3423 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003424{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003425 struct kvm *kvm = gmap->private;
3426 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003427 unsigned long prefix;
3428 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003429
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003430 if (gmap_is_shadow(gmap))
3431 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003432 if (start >= 1UL << 31)
3433 /* We are only interested in prefix pages */
3434 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003435 kvm_for_each_vcpu(i, vcpu, kvm) {
3436 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003437 prefix = kvm_s390_get_prefix(vcpu);
3438 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3439 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3440 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003441 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003442 }
3443 }
3444}
3445
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003446bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3447{
3448 /* do not poll with more than halt_poll_max_steal percent of steal time */
3449 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3450 halt_poll_max_steal) {
3451 vcpu->stat.halt_no_poll_steal++;
3452 return true;
3453 }
3454 return false;
3455}
3456
Christoffer Dallb6d33832012-03-08 16:44:24 -05003457int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3458{
3459 /* kvm common code refers to this, but never calls it */
3460 BUG();
3461 return 0;
3462}
3463
Carsten Otte14eebd92012-05-15 14:15:26 +02003464static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3465 struct kvm_one_reg *reg)
3466{
3467 int r = -EINVAL;
3468
3469 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003470 case KVM_REG_S390_TODPR:
3471 r = put_user(vcpu->arch.sie_block->todpr,
3472 (u32 __user *)reg->addr);
3473 break;
3474 case KVM_REG_S390_EPOCHDIFF:
3475 r = put_user(vcpu->arch.sie_block->epoch,
3476 (u64 __user *)reg->addr);
3477 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003478 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003479 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003480 (u64 __user *)reg->addr);
3481 break;
3482 case KVM_REG_S390_CLOCK_COMP:
3483 r = put_user(vcpu->arch.sie_block->ckc,
3484 (u64 __user *)reg->addr);
3485 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003486 case KVM_REG_S390_PFTOKEN:
3487 r = put_user(vcpu->arch.pfault_token,
3488 (u64 __user *)reg->addr);
3489 break;
3490 case KVM_REG_S390_PFCOMPARE:
3491 r = put_user(vcpu->arch.pfault_compare,
3492 (u64 __user *)reg->addr);
3493 break;
3494 case KVM_REG_S390_PFSELECT:
3495 r = put_user(vcpu->arch.pfault_select,
3496 (u64 __user *)reg->addr);
3497 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003498 case KVM_REG_S390_PP:
3499 r = put_user(vcpu->arch.sie_block->pp,
3500 (u64 __user *)reg->addr);
3501 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003502 case KVM_REG_S390_GBEA:
3503 r = put_user(vcpu->arch.sie_block->gbea,
3504 (u64 __user *)reg->addr);
3505 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003506 default:
3507 break;
3508 }
3509
3510 return r;
3511}
3512
3513static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3514 struct kvm_one_reg *reg)
3515{
3516 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003517 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003518
3519 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003520 case KVM_REG_S390_TODPR:
3521 r = get_user(vcpu->arch.sie_block->todpr,
3522 (u32 __user *)reg->addr);
3523 break;
3524 case KVM_REG_S390_EPOCHDIFF:
3525 r = get_user(vcpu->arch.sie_block->epoch,
3526 (u64 __user *)reg->addr);
3527 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003528 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003529 r = get_user(val, (u64 __user *)reg->addr);
3530 if (!r)
3531 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003532 break;
3533 case KVM_REG_S390_CLOCK_COMP:
3534 r = get_user(vcpu->arch.sie_block->ckc,
3535 (u64 __user *)reg->addr);
3536 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003537 case KVM_REG_S390_PFTOKEN:
3538 r = get_user(vcpu->arch.pfault_token,
3539 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003540 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3541 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003542 break;
3543 case KVM_REG_S390_PFCOMPARE:
3544 r = get_user(vcpu->arch.pfault_compare,
3545 (u64 __user *)reg->addr);
3546 break;
3547 case KVM_REG_S390_PFSELECT:
3548 r = get_user(vcpu->arch.pfault_select,
3549 (u64 __user *)reg->addr);
3550 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003551 case KVM_REG_S390_PP:
3552 r = get_user(vcpu->arch.sie_block->pp,
3553 (u64 __user *)reg->addr);
3554 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003555 case KVM_REG_S390_GBEA:
3556 r = get_user(vcpu->arch.sie_block->gbea,
3557 (u64 __user *)reg->addr);
3558 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003559 default:
3560 break;
3561 }
3562
3563 return r;
3564}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003565
Janosch Frank7de3f142020-01-31 05:02:02 -05003566static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003567{
Janosch Frank7de3f142020-01-31 05:02:02 -05003568 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3569 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3570 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3571
3572 kvm_clear_async_pf_completion_queue(vcpu);
3573 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3574 kvm_s390_vcpu_stop(vcpu);
3575 kvm_s390_clear_local_irqs(vcpu);
3576}
3577
3578static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3579{
3580 /* Initial reset is a superset of the normal reset */
3581 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3582
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003583 /*
3584 * This equals initial cpu reset in pop, but we don't switch to ESA.
3585 * We do not only reset the internal data, but also ...
3586 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003587 vcpu->arch.sie_block->gpsw.mask = 0;
3588 vcpu->arch.sie_block->gpsw.addr = 0;
3589 kvm_s390_set_prefix(vcpu, 0);
3590 kvm_s390_set_cpu_timer(vcpu, 0);
3591 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003592 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3593 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3594 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003595
3596 /* ... the data in sync regs */
3597 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3598 vcpu->run->s.regs.ckc = 0;
3599 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3600 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3601 vcpu->run->psw_addr = 0;
3602 vcpu->run->psw_mask = 0;
3603 vcpu->run->s.regs.todpr = 0;
3604 vcpu->run->s.regs.cputm = 0;
3605 vcpu->run->s.regs.ckc = 0;
3606 vcpu->run->s.regs.pp = 0;
3607 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003608 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003609 /*
3610 * Do not reset these registers in the protected case, as some of
3611 * them are overlayed and they are not accessible in this case
3612 * anyway.
3613 */
3614 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3615 vcpu->arch.sie_block->gbea = 1;
3616 vcpu->arch.sie_block->pp = 0;
3617 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3618 vcpu->arch.sie_block->todpr = 0;
3619 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003620}
3621
3622static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3623{
3624 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3625
3626 /* Clear reset is a superset of the initial reset */
3627 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3628
3629 memset(&regs->gprs, 0, sizeof(regs->gprs));
3630 memset(&regs->vrs, 0, sizeof(regs->vrs));
3631 memset(&regs->acrs, 0, sizeof(regs->acrs));
3632 memset(&regs->gscb, 0, sizeof(regs->gscb));
3633
3634 regs->etoken = 0;
3635 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003636}
3637
3638int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3639{
Christoffer Dall875656f2017-12-04 21:35:27 +01003640 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003641 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003642 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003643 return 0;
3644}
3645
3646int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3647{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003648 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003649 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003650 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003651 return 0;
3652}
3653
3654int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3655 struct kvm_sregs *sregs)
3656{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003657 vcpu_load(vcpu);
3658
Christian Borntraeger59674c12012-01-11 11:20:33 +01003659 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003660 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003661
3662 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003663 return 0;
3664}
3665
3666int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3667 struct kvm_sregs *sregs)
3668{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003669 vcpu_load(vcpu);
3670
Christian Borntraeger59674c12012-01-11 11:20:33 +01003671 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003672 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003673
3674 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003675 return 0;
3676}
3677
3678int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3679{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003680 int ret = 0;
3681
3682 vcpu_load(vcpu);
3683
3684 if (test_fp_ctl(fpu->fpc)) {
3685 ret = -EINVAL;
3686 goto out;
3687 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003688 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003689 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003690 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3691 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003692 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003693 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003694
3695out:
3696 vcpu_put(vcpu);
3697 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003698}
3699
3700int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3701{
Christoffer Dall13931232017-12-04 21:35:34 +01003702 vcpu_load(vcpu);
3703
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003704 /* make sure we have the latest values */
3705 save_fpu_regs();
3706 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003707 convert_vx_to_fp((freg_t *) fpu->fprs,
3708 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003709 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003710 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003711 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003712
3713 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003714 return 0;
3715}
3716
3717static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3718{
3719 int rc = 0;
3720
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003721 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003722 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003723 else {
3724 vcpu->run->psw_mask = psw.mask;
3725 vcpu->run->psw_addr = psw.addr;
3726 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003727 return rc;
3728}
3729
3730int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3731 struct kvm_translation *tr)
3732{
3733 return -EINVAL; /* not implemented yet */
3734}
3735
David Hildenbrand27291e22014-01-23 12:26:52 +01003736#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3737 KVM_GUESTDBG_USE_HW_BP | \
3738 KVM_GUESTDBG_ENABLE)
3739
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003740int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3741 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003742{
David Hildenbrand27291e22014-01-23 12:26:52 +01003743 int rc = 0;
3744
Christoffer Dall66b56562017-12-04 21:35:33 +01003745 vcpu_load(vcpu);
3746
David Hildenbrand27291e22014-01-23 12:26:52 +01003747 vcpu->guest_debug = 0;
3748 kvm_s390_clear_bp_data(vcpu);
3749
Christoffer Dall66b56562017-12-04 21:35:33 +01003750 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3751 rc = -EINVAL;
3752 goto out;
3753 }
3754 if (!sclp.has_gpere) {
3755 rc = -EINVAL;
3756 goto out;
3757 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003758
3759 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3760 vcpu->guest_debug = dbg->control;
3761 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003762 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003763
3764 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3765 rc = kvm_s390_import_bp_data(vcpu, dbg);
3766 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003767 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003768 vcpu->arch.guestdbg.last_bp = 0;
3769 }
3770
3771 if (rc) {
3772 vcpu->guest_debug = 0;
3773 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003774 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003775 }
3776
Christoffer Dall66b56562017-12-04 21:35:33 +01003777out:
3778 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003779 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003780}
3781
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003782int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3783 struct kvm_mp_state *mp_state)
3784{
Christoffer Dallfd232562017-12-04 21:35:30 +01003785 int ret;
3786
3787 vcpu_load(vcpu);
3788
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003789 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003790 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3791 KVM_MP_STATE_OPERATING;
3792
3793 vcpu_put(vcpu);
3794 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003795}
3796
3797int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3798 struct kvm_mp_state *mp_state)
3799{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003800 int rc = 0;
3801
Christoffer Dalle83dff52017-12-04 21:35:31 +01003802 vcpu_load(vcpu);
3803
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003804 /* user space knows about this interface - let it control the state */
Eric Farman67cf68b2021-10-08 22:31:12 +02003805 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003806
3807 switch (mp_state->mp_state) {
3808 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003809 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003810 break;
3811 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003812 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003813 break;
3814 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003815 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3816 rc = -ENXIO;
3817 break;
3818 }
3819 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3820 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003821 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003822 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003823 default:
3824 rc = -ENXIO;
3825 }
3826
Christoffer Dalle83dff52017-12-04 21:35:31 +01003827 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003828 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003829}
3830
David Hildenbrand8ad35752014-03-14 11:00:21 +01003831static bool ibs_enabled(struct kvm_vcpu *vcpu)
3832{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003833 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003834}
3835
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003836static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3837{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003838retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003839 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003840 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003841 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003842 /*
3843 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003844 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003845 * This ensures that the ipte instruction for this request has
3846 * already finished. We might race against a second unmapper that
3847 * wants to set the blocking bit. Lets just retry the request loop.
3848 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003849 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003850 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003851 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3852 kvm_s390_get_prefix(vcpu),
3853 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003854 if (rc) {
3855 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003856 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003857 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003858 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003859 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003860
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003861 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3862 vcpu->arch.sie_block->ihcpu = 0xffff;
3863 goto retry;
3864 }
3865
David Hildenbrand8ad35752014-03-14 11:00:21 +01003866 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3867 if (!ibs_enabled(vcpu)) {
3868 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003869 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003870 }
3871 goto retry;
3872 }
3873
3874 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3875 if (ibs_enabled(vcpu)) {
3876 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003877 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003878 }
3879 goto retry;
3880 }
3881
David Hildenbrand6502a342016-06-21 14:19:51 +02003882 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3883 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3884 goto retry;
3885 }
3886
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003887 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3888 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003889 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003890 * instruction manually, in order to provide additional
3891 * functionalities needed for live migration.
3892 */
3893 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3894 goto retry;
3895 }
3896
3897 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3898 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003899 * Re-enable CMM virtualization if CMMA is available and
3900 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003901 */
3902 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003903 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003904 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3905 goto retry;
3906 }
3907
David Hildenbrand0759d062014-05-13 16:54:32 +02003908 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003909 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003910 /* we left the vsie handler, nothing to do, just clear the request */
3911 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003912
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003913 return 0;
3914}
3915
David Hildenbrand0e7def52018-02-07 12:46:43 +01003916void kvm_s390_set_tod_clock(struct kvm *kvm,
3917 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003918{
3919 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003920 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003921 int i;
3922
3923 mutex_lock(&kvm->lock);
3924 preempt_disable();
3925
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003926 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003927
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003928 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003929 kvm->arch.epdx = 0;
3930 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003931 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003932 if (kvm->arch.epoch > gtod->tod)
3933 kvm->arch.epdx -= 1;
3934 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003935
3936 kvm_s390_vcpu_block_all(kvm);
3937 kvm_for_each_vcpu(i, vcpu, kvm) {
3938 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3939 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3940 }
3941
3942 kvm_s390_vcpu_unblock_all(kvm);
3943 preempt_enable();
3944 mutex_unlock(&kvm->lock);
3945}
3946
Thomas Huthfa576c52014-05-06 17:20:16 +02003947/**
3948 * kvm_arch_fault_in_page - fault-in guest page if necessary
3949 * @vcpu: The corresponding virtual cpu
3950 * @gpa: Guest physical address
3951 * @writable: Whether the page should be writable or not
3952 *
3953 * Make sure that a guest page has been faulted-in on the host.
3954 *
3955 * Return: Zero on success, negative error code otherwise.
3956 */
3957long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003958{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003959 return gmap_fault(vcpu->arch.gmap, gpa,
3960 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003961}
3962
Dominik Dingel3c038e62013-10-07 17:11:48 +02003963static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3964 unsigned long token)
3965{
3966 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003967 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003968
3969 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003970 irq.u.ext.ext_params2 = token;
3971 irq.type = KVM_S390_INT_PFAULT_INIT;
3972 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003973 } else {
3974 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003975 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003976 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3977 }
3978}
3979
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003980bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003981 struct kvm_async_pf *work)
3982{
3983 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3984 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003985
3986 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003987}
3988
3989void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3990 struct kvm_async_pf *work)
3991{
3992 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3993 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3994}
3995
3996void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3997 struct kvm_async_pf *work)
3998{
3999 /* s390 will always inject the page directly */
4000}
4001
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02004002bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02004003{
4004 /*
4005 * s390 will always inject the page directly,
4006 * but we still want check_async_completion to cleanup
4007 */
4008 return true;
4009}
4010
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004011static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02004012{
4013 hva_t hva;
4014 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004015
4016 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004017 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004018 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4019 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004020 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004021 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004022 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02004023 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004024 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02004025 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004026 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004027 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004028 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004029
Heiko Carstens81480cc2014-01-01 16:36:07 +01004030 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4031 hva += current->thread.gmap_addr & ~PAGE_MASK;
4032 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004033 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004034
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004035 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02004036}
4037
Thomas Huth3fb4c402013-09-12 10:33:43 +02004038static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004039{
Thomas Huth3fb4c402013-09-12 10:33:43 +02004040 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01004041
Dominik Dingel3c038e62013-10-07 17:11:48 +02004042 /*
4043 * On s390 notifications for arriving pages will be delivered directly
4044 * to the guest but the house keeping for completed pfaults is
4045 * handled outside the worker.
4046 */
4047 kvm_check_async_pf_completion(vcpu);
4048
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004049 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4050 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004051
4052 if (need_resched())
4053 schedule();
4054
Jens Freimann79395032014-04-17 10:10:30 +02004055 if (!kvm_is_ucontrol(vcpu->kvm)) {
4056 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4057 if (rc)
4058 return rc;
4059 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004060
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004061 rc = kvm_s390_handle_requests(vcpu);
4062 if (rc)
4063 return rc;
4064
David Hildenbrand27291e22014-01-23 12:26:52 +01004065 if (guestdbg_enabled(vcpu)) {
4066 kvm_s390_backup_guest_per_regs(vcpu);
4067 kvm_s390_patch_guest_per_regs(vcpu);
4068 }
4069
Sean Christopherson4eeef242021-09-10 11:32:19 -07004070 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
Michael Mueller9f30f622019-01-31 09:52:44 +01004071
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004072 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004073 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4074 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4075 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004076
Thomas Huth3fb4c402013-09-12 10:33:43 +02004077 return 0;
4078}
4079
Thomas Huth492d8642015-02-10 16:11:01 +01004080static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4081{
David Hildenbrand56317922016-01-12 17:37:58 +01004082 struct kvm_s390_pgm_info pgm_info = {
4083 .code = PGM_ADDRESSING,
4084 };
4085 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004086 int rc;
4087
4088 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4089 trace_kvm_s390_sie_fault(vcpu);
4090
4091 /*
4092 * We want to inject an addressing exception, which is defined as a
4093 * suppressing or terminating exception. However, since we came here
4094 * by a DAT access exception, the PSW still points to the faulting
4095 * instruction since DAT exceptions are nullifying. So we've got
4096 * to look up the current opcode to get the length of the instruction
4097 * to be able to forward the PSW.
4098 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004099 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004100 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004101 if (rc < 0) {
4102 return rc;
4103 } else if (rc) {
4104 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4105 * Forward by arbitrary ilc, injection will take care of
4106 * nullification if necessary.
4107 */
4108 pgm_info = vcpu->arch.pgm;
4109 ilen = 4;
4110 }
David Hildenbrand56317922016-01-12 17:37:58 +01004111 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4112 kvm_s390_forward_psw(vcpu, ilen);
4113 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004114}
4115
Thomas Huth3fb4c402013-09-12 10:33:43 +02004116static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4117{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004118 struct mcck_volatile_info *mcck_info;
4119 struct sie_page *sie_page;
4120
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004121 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4122 vcpu->arch.sie_block->icptcode);
4123 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4124
David Hildenbrand27291e22014-01-23 12:26:52 +01004125 if (guestdbg_enabled(vcpu))
4126 kvm_s390_restore_guest_per_regs(vcpu);
4127
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004128 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4129 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004130
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004131 if (exit_reason == -EINTR) {
4132 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4133 sie_page = container_of(vcpu->arch.sie_block,
4134 struct sie_page, sie_block);
4135 mcck_info = &sie_page->mcck_info;
4136 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4137 return 0;
4138 }
4139
David Hildenbrand71f116b2015-10-19 16:24:28 +02004140 if (vcpu->arch.sie_block->icptcode > 0) {
4141 int rc = kvm_handle_sie_intercept(vcpu);
4142
4143 if (rc != -EOPNOTSUPP)
4144 return rc;
4145 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4146 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4147 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4148 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4149 return -EREMOTE;
4150 } else if (exit_reason != -EFAULT) {
4151 vcpu->stat.exit_null++;
4152 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004153 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4154 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4155 vcpu->run->s390_ucontrol.trans_exc_code =
4156 current->thread.gmap_addr;
4157 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004158 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004159 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004160 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004161 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004162 if (kvm_arch_setup_async_pf(vcpu))
4163 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004164 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004165 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004166 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004167 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004168}
4169
Janosch Frank3adae0b2019-12-13 08:26:06 -05004170#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004171static int __vcpu_run(struct kvm_vcpu *vcpu)
4172{
4173 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004174 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004175
Thomas Huth800c1062013-09-12 10:33:45 +02004176 /*
4177 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4178 * ning the guest), so that memslots (and other stuff) are protected
4179 */
4180 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4181
Thomas Hutha76ccff2013-09-12 10:33:44 +02004182 do {
4183 rc = vcpu_pre_run(vcpu);
4184 if (rc)
4185 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004186
Thomas Huth800c1062013-09-12 10:33:45 +02004187 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004188 /*
4189 * As PF_VCPU will be used in fault handler, between
4190 * guest_enter and guest_exit should be no uaccess.
4191 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004192 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004193 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004194 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004195 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004196 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4197 memcpy(sie_page->pv_grregs,
4198 vcpu->run->s.regs.gprs,
4199 sizeof(sie_page->pv_grregs));
4200 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004201 if (test_cpu_flag(CIF_FPU))
4202 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004203 exit_reason = sie64a(vcpu->arch.sie_block,
4204 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004205 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4206 memcpy(vcpu->run->s.regs.gprs,
4207 sie_page->pv_grregs,
4208 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004209 /*
4210 * We're not allowed to inject interrupts on intercepts
4211 * that leave the guest state in an "in-between" state
4212 * where the next SIE entry will do a continuation.
4213 * Fence interrupts in our "internal" PSW.
4214 */
4215 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4216 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4217 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4218 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004219 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004220 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004221 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004222 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004223 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004224 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004225
Thomas Hutha76ccff2013-09-12 10:33:44 +02004226 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004227 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004228
Thomas Huth800c1062013-09-12 10:33:45 +02004229 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004230 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004231}
4232
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004233static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004234{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004235 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004236 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004237 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004238
4239 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004240 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004241 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4242 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004243 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004244 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4245 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4246 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4247 }
4248 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4249 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4250 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4251 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004252 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4253 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004254 }
Collin Walling23a60f82020-06-22 11:46:36 -04004255 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4256 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4257 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
Collin Walling3fd84172021-10-26 22:54:51 -04004258 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
Collin Walling23a60f82020-06-22 11:46:36 -04004259 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004260 /*
4261 * If userspace sets the riccb (e.g. after migration) to a valid state,
4262 * we should enable RI here instead of doing the lazy enablement.
4263 */
4264 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004265 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004266 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004267 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004268 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004269 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004270 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004271 /*
4272 * If userspace sets the gscb (e.g. after migration) to non-zero,
4273 * we should enable GS here instead of doing the lazy enablement.
4274 */
4275 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4276 test_kvm_facility(vcpu->kvm, 133) &&
4277 gscb->gssm &&
4278 !vcpu->arch.gs_enabled) {
4279 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4280 vcpu->arch.sie_block->ecb |= ECB_GS;
4281 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4282 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004283 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004284 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4285 test_kvm_facility(vcpu->kvm, 82)) {
4286 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4287 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4288 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004289 if (MACHINE_HAS_GS) {
4290 preempt_disable();
4291 __ctl_set_bit(2, 4);
4292 if (current->thread.gs_cb) {
4293 vcpu->arch.host_gscb = current->thread.gs_cb;
4294 save_gs_cb(vcpu->arch.host_gscb);
4295 }
4296 if (vcpu->arch.gs_enabled) {
4297 current->thread.gs_cb = (struct gs_cb *)
4298 &vcpu->run->s.regs.gscb;
4299 restore_gs_cb(current->thread.gs_cb);
4300 }
4301 preempt_enable();
4302 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004303 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004304}
4305
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004306static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004307{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004308 struct kvm_run *kvm_run = vcpu->run;
4309
Janosch Frank811ea792019-06-14 13:11:21 +02004310 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4311 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4312 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4313 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4314 /* some control register changes require a tlb flush */
4315 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4316 }
4317 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4318 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4319 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4320 }
4321 save_access_regs(vcpu->arch.host_acrs);
4322 restore_access_regs(vcpu->run->s.regs.acrs);
4323 /* save host (userspace) fprs/vrs */
4324 save_fpu_regs();
4325 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4326 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4327 if (MACHINE_HAS_VX)
4328 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4329 else
4330 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4331 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4332 if (test_fp_ctl(current->thread.fpu.fpc))
4333 /* User space provided an invalid FPC, let's clear it */
4334 current->thread.fpu.fpc = 0;
4335
4336 /* Sync fmt2 only data */
4337 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004338 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004339 } else {
4340 /*
4341 * In several places we have to modify our internal view to
4342 * not do things that are disallowed by the ultravisor. For
4343 * example we must not inject interrupts after specific exits
4344 * (e.g. 112 prefix page not secure). We do this by turning
4345 * off the machine check, external and I/O interrupt bits
4346 * of our PSW copy. To avoid getting validity intercepts, we
4347 * do only accept the condition code from userspace.
4348 */
4349 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4350 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4351 PSW_MASK_CC;
4352 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004353
David Hildenbrandb028ee32014-07-17 10:47:43 +02004354 kvm_run->kvm_dirty_regs = 0;
4355}
4356
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004357static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004358{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004359 struct kvm_run *kvm_run = vcpu->run;
4360
David Hildenbrandb028ee32014-07-17 10:47:43 +02004361 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4362 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4363 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004364 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004365 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004366 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004367 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004368 __ctl_set_bit(2, 4);
4369 if (vcpu->arch.gs_enabled)
4370 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004371 current->thread.gs_cb = vcpu->arch.host_gscb;
4372 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004373 if (!vcpu->arch.host_gscb)
4374 __ctl_clear_bit(2, 4);
4375 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004376 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004377 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004378 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004379}
4380
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004381static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004382{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004383 struct kvm_run *kvm_run = vcpu->run;
4384
Janosch Frank811ea792019-06-14 13:11:21 +02004385 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4386 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4387 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4388 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4389 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4390 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4391 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4392 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4393 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4394 save_access_regs(vcpu->run->s.regs.acrs);
4395 restore_access_regs(vcpu->arch.host_acrs);
4396 /* Save guest register state */
4397 save_fpu_regs();
4398 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4399 /* Restore will be done lazily at return */
4400 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4401 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4402 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004403 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004404}
4405
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004406int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004407{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004408 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004409 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004410
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004411 if (kvm_run->immediate_exit)
4412 return -EINTR;
4413
Thomas Huth200824f2019-09-04 10:51:59 +02004414 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4415 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4416 return -EINVAL;
4417
Christoffer Dallaccb7572017-12-04 21:35:25 +01004418 vcpu_load(vcpu);
4419
David Hildenbrand27291e22014-01-23 12:26:52 +01004420 if (guestdbg_exit_pending(vcpu)) {
4421 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004422 rc = 0;
4423 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004424 }
4425
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004426 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004427
Janosch Frankfe28c7862019-05-15 13:24:30 +02004428 /*
4429 * no need to check the return value of vcpu_start as it can only have
4430 * an error for protvirt, but protvirt means user cpu state
4431 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004432 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4433 kvm_s390_vcpu_start(vcpu);
4434 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004435 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004436 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004437 rc = -EINVAL;
4438 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004439 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004440
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004441 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004442 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004443
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004444 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004445 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004446
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004447 if (signal_pending(current) && !rc) {
4448 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004449 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004450 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004451
David Hildenbrand27291e22014-01-23 12:26:52 +01004452 if (guestdbg_exit_pending(vcpu) && !rc) {
4453 kvm_s390_prepare_debug_exit(vcpu);
4454 rc = 0;
4455 }
4456
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004457 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004458 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004459 rc = 0;
4460 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004461
David Hildenbranddb0758b2016-02-15 09:42:25 +01004462 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004463 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004464
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004465 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004466
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004467 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004468out:
4469 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004470 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004471}
4472
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004473/*
4474 * store status at address
4475 * we use have two special cases:
4476 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4477 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4478 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004479int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004480{
Carsten Otte092670c2011-07-24 10:48:22 +02004481 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004482 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004483 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004484 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004485 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004486
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004487 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004488 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4489 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004490 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004491 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004492 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4493 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004494 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004495 gpa = px;
4496 } else
4497 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004498
4499 /* manually convert vector registers if necessary */
4500 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004501 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004502 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4503 fprs, 128);
4504 } else {
4505 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004506 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004507 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004508 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004509 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004510 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004511 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004512 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004513 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004514 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004515 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004516 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004517 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004518 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004519 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004520 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004521 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004522 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004523 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004524 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004525 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004526 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004527 &vcpu->arch.sie_block->gcr, 128);
4528 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004529}
4530
Thomas Huthe8798922013-11-06 15:46:33 +01004531int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4532{
4533 /*
4534 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004535 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004536 * it into the save area
4537 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004538 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004539 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004540 save_access_regs(vcpu->run->s.regs.acrs);
4541
4542 return kvm_s390_store_status_unloaded(vcpu, addr);
4543}
4544
David Hildenbrand8ad35752014-03-14 11:00:21 +01004545static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4546{
4547 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004548 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004549}
4550
4551static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4552{
4553 unsigned int i;
4554 struct kvm_vcpu *vcpu;
4555
4556 kvm_for_each_vcpu(i, vcpu, kvm) {
4557 __disable_ibs_on_vcpu(vcpu);
4558 }
4559}
4560
4561static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4562{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004563 if (!sclp.has_ibs)
4564 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004565 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004566 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004567}
4568
Janosch Frankfe28c7862019-05-15 13:24:30 +02004569int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004570{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004571 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004572
4573 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004574 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004575
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004576 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004577 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004578 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004579 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4580
Janosch Frankfe28c7862019-05-15 13:24:30 +02004581 /* Let's tell the UV that we want to change into the operating state */
4582 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4583 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4584 if (r) {
4585 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4586 return r;
4587 }
4588 }
4589
David Hildenbrand8ad35752014-03-14 11:00:21 +01004590 for (i = 0; i < online_vcpus; i++) {
4591 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4592 started_vcpus++;
4593 }
4594
4595 if (started_vcpus == 0) {
4596 /* we're the only active VCPU -> speed it up */
4597 __enable_ibs_on_vcpu(vcpu);
4598 } else if (started_vcpus == 1) {
4599 /*
4600 * As we are starting a second VCPU, we have to disable
4601 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304602 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004603 */
4604 __disable_ibs_on_all_vcpus(vcpu->kvm);
4605 }
4606
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004607 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004608 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004609 * The real PSW might have changed due to a RESTART interpreted by the
4610 * ultravisor. We block all interrupts and let the next sie exit
4611 * refresh our view.
4612 */
4613 if (kvm_s390_pv_cpu_is_protected(vcpu))
4614 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4615 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004616 * Another VCPU might have used IBS while we were offline.
4617 * Let's play safe and flush the VCPU at startup.
4618 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004619 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004620 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004621 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004622}
4623
Janosch Frankfe28c7862019-05-15 13:24:30 +02004624int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004625{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004626 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004627 struct kvm_vcpu *started_vcpu = NULL;
4628
4629 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004630 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004631
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004632 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004633 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004634 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004635 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4636
Janosch Frankfe28c7862019-05-15 13:24:30 +02004637 /* Let's tell the UV that we want to change into the stopped state */
4638 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4639 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4640 if (r) {
4641 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4642 return r;
4643 }
4644 }
4645
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004646 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004647 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004648
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004649 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004650 __disable_ibs_on_vcpu(vcpu);
4651
4652 for (i = 0; i < online_vcpus; i++) {
4653 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4654 started_vcpus++;
4655 started_vcpu = vcpu->kvm->vcpus[i];
4656 }
4657 }
4658
4659 if (started_vcpus == 1) {
4660 /*
4661 * As we only have one VCPU left, we want to enable the
4662 * IBS facility for that VCPU to speed it up.
4663 */
4664 __enable_ibs_on_vcpu(started_vcpu);
4665 }
4666
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004667 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004668 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004669}
4670
Cornelia Huckd6712df2012-12-20 15:32:11 +01004671static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4672 struct kvm_enable_cap *cap)
4673{
4674 int r;
4675
4676 if (cap->flags)
4677 return -EINVAL;
4678
4679 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004680 case KVM_CAP_S390_CSS_SUPPORT:
4681 if (!vcpu->kvm->arch.css_support) {
4682 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004683 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004684 trace_kvm_s390_enable_css(vcpu->kvm);
4685 }
4686 r = 0;
4687 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004688 default:
4689 r = -EINVAL;
4690 break;
4691 }
4692 return r;
4693}
4694
Janosch Frank19e12272019-04-02 09:21:06 +02004695static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4696 struct kvm_s390_mem_op *mop)
4697{
4698 void __user *uaddr = (void __user *)mop->buf;
4699 int r = 0;
4700
4701 if (mop->flags || !mop->size)
4702 return -EINVAL;
4703 if (mop->size + mop->sida_offset < mop->size)
4704 return -EINVAL;
4705 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4706 return -E2BIG;
4707
4708 switch (mop->op) {
4709 case KVM_S390_MEMOP_SIDA_READ:
4710 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4711 mop->sida_offset), mop->size))
4712 r = -EFAULT;
4713
4714 break;
4715 case KVM_S390_MEMOP_SIDA_WRITE:
4716 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4717 mop->sida_offset), uaddr, mop->size))
4718 r = -EFAULT;
4719 break;
4720 }
4721 return r;
4722}
Thomas Huth41408c282015-02-06 15:01:21 +01004723static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4724 struct kvm_s390_mem_op *mop)
4725{
4726 void __user *uaddr = (void __user *)mop->buf;
4727 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004728 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004729 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4730 | KVM_S390_MEMOP_F_CHECK_ONLY;
4731
Thomas Hutha13b03b2019-08-29 14:25:17 +02004732 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004733 return -EINVAL;
4734
4735 if (mop->size > MEM_OP_MAX_SIZE)
4736 return -E2BIG;
4737
Janosch Frank19e12272019-04-02 09:21:06 +02004738 if (kvm_s390_pv_cpu_is_protected(vcpu))
4739 return -EINVAL;
4740
Thomas Huth41408c282015-02-06 15:01:21 +01004741 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4742 tmpbuf = vmalloc(mop->size);
4743 if (!tmpbuf)
4744 return -ENOMEM;
4745 }
4746
Thomas Huth41408c282015-02-06 15:01:21 +01004747 switch (mop->op) {
4748 case KVM_S390_MEMOP_LOGICAL_READ:
4749 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004750 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4751 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004752 break;
4753 }
4754 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4755 if (r == 0) {
4756 if (copy_to_user(uaddr, tmpbuf, mop->size))
4757 r = -EFAULT;
4758 }
4759 break;
4760 case KVM_S390_MEMOP_LOGICAL_WRITE:
4761 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004762 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4763 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004764 break;
4765 }
4766 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4767 r = -EFAULT;
4768 break;
4769 }
4770 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4771 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004772 }
4773
Thomas Huth41408c282015-02-06 15:01:21 +01004774 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4775 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4776
4777 vfree(tmpbuf);
4778 return r;
4779}
4780
Janosch Frank19e12272019-04-02 09:21:06 +02004781static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4782 struct kvm_s390_mem_op *mop)
4783{
4784 int r, srcu_idx;
4785
4786 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4787
4788 switch (mop->op) {
4789 case KVM_S390_MEMOP_LOGICAL_READ:
4790 case KVM_S390_MEMOP_LOGICAL_WRITE:
4791 r = kvm_s390_guest_mem_op(vcpu, mop);
4792 break;
4793 case KVM_S390_MEMOP_SIDA_READ:
4794 case KVM_S390_MEMOP_SIDA_WRITE:
4795 /* we are locked against sida going away by the vcpu->mutex */
4796 r = kvm_s390_guest_sida_op(vcpu, mop);
4797 break;
4798 default:
4799 r = -EINVAL;
4800 }
4801
4802 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4803 return r;
4804}
4805
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004806long kvm_arch_vcpu_async_ioctl(struct file *filp,
4807 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004808{
4809 struct kvm_vcpu *vcpu = filp->private_data;
4810 void __user *argp = (void __user *)arg;
4811
Avi Kivity93736622010-05-13 12:35:17 +03004812 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004813 case KVM_S390_IRQ: {
4814 struct kvm_s390_irq s390irq;
4815
Jens Freimann47b43c52014-11-11 20:57:06 +01004816 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004817 return -EFAULT;
4818 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004819 }
Avi Kivity93736622010-05-13 12:35:17 +03004820 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004821 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004822 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004823
4824 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004825 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004826 if (s390int_to_s390irq(&s390int, &s390irq))
4827 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004828 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004829 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004830 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004831 return -ENOIOCTLCMD;
4832}
4833
4834long kvm_arch_vcpu_ioctl(struct file *filp,
4835 unsigned int ioctl, unsigned long arg)
4836{
4837 struct kvm_vcpu *vcpu = filp->private_data;
4838 void __user *argp = (void __user *)arg;
4839 int idx;
4840 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004841 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004842
4843 vcpu_load(vcpu);
4844
4845 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004846 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004847 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004848 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004849 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004850 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004851 case KVM_S390_SET_INITIAL_PSW: {
4852 psw_t psw;
4853
Avi Kivitybc923cc2010-05-13 12:21:46 +03004854 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004855 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004856 break;
4857 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4858 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004859 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004860 case KVM_S390_CLEAR_RESET:
4861 r = 0;
4862 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004863 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4864 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4865 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4866 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4867 rc, rrc);
4868 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004869 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004870 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004871 r = 0;
4872 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004873 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4874 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4875 UVC_CMD_CPU_RESET_INITIAL,
4876 &rc, &rrc);
4877 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4878 rc, rrc);
4879 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004880 break;
4881 case KVM_S390_NORMAL_RESET:
4882 r = 0;
4883 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004884 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4885 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4886 UVC_CMD_CPU_RESET, &rc, &rrc);
4887 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4888 rc, rrc);
4889 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004890 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004891 case KVM_SET_ONE_REG:
4892 case KVM_GET_ONE_REG: {
4893 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004894 r = -EINVAL;
4895 if (kvm_s390_pv_cpu_is_protected(vcpu))
4896 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004897 r = -EFAULT;
4898 if (copy_from_user(&reg, argp, sizeof(reg)))
4899 break;
4900 if (ioctl == KVM_SET_ONE_REG)
4901 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4902 else
4903 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4904 break;
4905 }
Carsten Otte27e03932012-01-04 10:25:21 +01004906#ifdef CONFIG_KVM_S390_UCONTROL
4907 case KVM_S390_UCAS_MAP: {
4908 struct kvm_s390_ucas_mapping ucasmap;
4909
4910 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4911 r = -EFAULT;
4912 break;
4913 }
4914
4915 if (!kvm_is_ucontrol(vcpu->kvm)) {
4916 r = -EINVAL;
4917 break;
4918 }
4919
4920 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4921 ucasmap.vcpu_addr, ucasmap.length);
4922 break;
4923 }
4924 case KVM_S390_UCAS_UNMAP: {
4925 struct kvm_s390_ucas_mapping ucasmap;
4926
4927 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4928 r = -EFAULT;
4929 break;
4930 }
4931
4932 if (!kvm_is_ucontrol(vcpu->kvm)) {
4933 r = -EINVAL;
4934 break;
4935 }
4936
4937 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4938 ucasmap.length);
4939 break;
4940 }
4941#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004942 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004943 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004944 break;
4945 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004946 case KVM_ENABLE_CAP:
4947 {
4948 struct kvm_enable_cap cap;
4949 r = -EFAULT;
4950 if (copy_from_user(&cap, argp, sizeof(cap)))
4951 break;
4952 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4953 break;
4954 }
Thomas Huth41408c282015-02-06 15:01:21 +01004955 case KVM_S390_MEM_OP: {
4956 struct kvm_s390_mem_op mem_op;
4957
4958 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004959 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004960 else
4961 r = -EFAULT;
4962 break;
4963 }
Jens Freimann816c7662014-11-24 17:13:46 +01004964 case KVM_S390_SET_IRQ_STATE: {
4965 struct kvm_s390_irq_state irq_state;
4966
4967 r = -EFAULT;
4968 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4969 break;
4970 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4971 irq_state.len == 0 ||
4972 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4973 r = -EINVAL;
4974 break;
4975 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004976 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004977 r = kvm_s390_set_irq_state(vcpu,
4978 (void __user *) irq_state.buf,
4979 irq_state.len);
4980 break;
4981 }
4982 case KVM_S390_GET_IRQ_STATE: {
4983 struct kvm_s390_irq_state irq_state;
4984
4985 r = -EFAULT;
4986 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4987 break;
4988 if (irq_state.len == 0) {
4989 r = -EINVAL;
4990 break;
4991 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004992 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004993 r = kvm_s390_get_irq_state(vcpu,
4994 (__u8 __user *) irq_state.buf,
4995 irq_state.len);
4996 break;
4997 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004998 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004999 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005000 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01005001
5002 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03005003 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005004}
5005
Souptick Joarder1499fa82018-04-19 00:49:58 +05305006vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01005007{
5008#ifdef CONFIG_KVM_S390_UCONTROL
5009 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
5010 && (kvm_is_ucontrol(vcpu->kvm))) {
5011 vmf->page = virt_to_page(vcpu->arch.sie_block);
5012 get_page(vmf->page);
5013 return 0;
5014 }
5015#endif
5016 return VM_FAULT_SIGBUS;
5017}
5018
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005019/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005020int kvm_arch_prepare_memory_region(struct kvm *kvm,
5021 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005022 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09005023 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005024{
Nick Wangdd2887e2013-03-25 17:22:57 +01005025 /* A few sanity checks. We can have memory slots which have to be
5026 located/ended at a segment boundary (1MB). The memory in userland is
5027 ok to be fragmented into various different vmas. It is okay to mmap()
5028 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005029
Carsten Otte598841c2011-07-24 10:48:21 +02005030 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005031 return -EINVAL;
5032
Carsten Otte598841c2011-07-24 10:48:21 +02005033 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005034 return -EINVAL;
5035
Dominik Dingela3a92c32014-12-01 17:24:42 +01005036 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
5037 return -EINVAL;
5038
Janosch Frank29b40f12019-09-30 04:19:18 -04005039 /* When we are protected, we should not change the memory slots */
5040 if (kvm_s390_pv_get_handle(kvm))
5041 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005042 return 0;
5043}
5044
5045void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005046 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005047 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005048 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005049 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005050{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005051 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005052
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005053 switch (change) {
5054 case KVM_MR_DELETE:
5055 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5056 old->npages * PAGE_SIZE);
5057 break;
5058 case KVM_MR_MOVE:
5059 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5060 old->npages * PAGE_SIZE);
5061 if (rc)
5062 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005063 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005064 case KVM_MR_CREATE:
5065 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5066 mem->guest_phys_addr, mem->memory_size);
5067 break;
5068 case KVM_MR_FLAGS_ONLY:
5069 break;
5070 default:
5071 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5072 }
Carsten Otte598841c2011-07-24 10:48:21 +02005073 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005074 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005075 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005076}
5077
Alexander Yarygin60a37702016-04-01 15:38:57 +03005078static inline unsigned long nonhyp_mask(int i)
5079{
5080 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5081
5082 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5083}
5084
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005085void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5086{
5087 vcpu->valid_wakeup = false;
5088}
5089
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005090static int __init kvm_s390_init(void)
5091{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005092 int i;
5093
David Hildenbrand07197fd2015-01-30 16:01:38 +01005094 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005095 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005096 return -ENODEV;
5097 }
5098
Janosch Franka4499382018-07-13 11:28:31 +01005099 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005100 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005101 return -EINVAL;
5102 }
5103
Alexander Yarygin60a37702016-04-01 15:38:57 +03005104 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005105 kvm_s390_fac_base[i] |=
Sven Schnelle17e89e12021-05-05 22:01:10 +02005106 stfle_fac_list[i] & nonhyp_mask(i);
Alexander Yarygin60a37702016-04-01 15:38:57 +03005107
Michael Mueller9d8d5782015-02-02 15:42:51 +01005108 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005109}
5110
5111static void __exit kvm_s390_exit(void)
5112{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005113 kvm_exit();
5114}
5115
5116module_init(kvm_s390_init);
5117module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005118
5119/*
5120 * Enable autoloading of the kvm module.
5121 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5122 * since x86 takes a different approach.
5123 */
5124#include <linux/miscdevice.h>
5125MODULE_ALIAS_MISCDEV(KVM_MINOR);
5126MODULE_ALIAS("devname:kvm");