blob: 959a568a97bebc13a0516470cb66fba38da4904d [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Sven Schnelle56e62a72020-11-21 11:14:56 +010048#include <asm/fpu/api.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010049#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010050#include "gaccess.h"
51
Cornelia Huck5786fff2012-07-23 17:20:29 +020052#define CREATE_TRACE_POINTS
53#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020054#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020055
Thomas Huth41408c282015-02-06 15:01:21 +010056#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010057#define LOCAL_IRQS 32
58#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
59 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010060
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000061const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
62 KVM_GENERIC_VM_STATS(),
63 STATS_DESC_COUNTER(VM, inject_io),
64 STATS_DESC_COUNTER(VM, inject_float_mchk),
65 STATS_DESC_COUNTER(VM, inject_pfault_done),
66 STATS_DESC_COUNTER(VM, inject_service_signal),
67 STATS_DESC_COUNTER(VM, inject_virtio)
68};
Jing Zhangfcfe1ba2021-06-18 22:27:05 +000069
70const struct kvm_stats_header kvm_vm_stats_header = {
71 .name_size = KVM_STATS_NAME_SIZE,
72 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
73 .id_offset = sizeof(struct kvm_stats_header),
74 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
75 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
76 sizeof(kvm_vm_stats_desc),
77};
78
Jing Zhangce55c042021-06-18 22:27:06 +000079const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
80 KVM_GENERIC_VCPU_STATS(),
81 STATS_DESC_COUNTER(VCPU, exit_userspace),
82 STATS_DESC_COUNTER(VCPU, exit_null),
83 STATS_DESC_COUNTER(VCPU, exit_external_request),
84 STATS_DESC_COUNTER(VCPU, exit_io_request),
85 STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
86 STATS_DESC_COUNTER(VCPU, exit_stop_request),
87 STATS_DESC_COUNTER(VCPU, exit_validity),
88 STATS_DESC_COUNTER(VCPU, exit_instruction),
89 STATS_DESC_COUNTER(VCPU, exit_pei),
90 STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
91 STATS_DESC_COUNTER(VCPU, instruction_lctl),
92 STATS_DESC_COUNTER(VCPU, instruction_lctlg),
93 STATS_DESC_COUNTER(VCPU, instruction_stctl),
94 STATS_DESC_COUNTER(VCPU, instruction_stctg),
95 STATS_DESC_COUNTER(VCPU, exit_program_interruption),
96 STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
97 STATS_DESC_COUNTER(VCPU, exit_operation_exception),
98 STATS_DESC_COUNTER(VCPU, deliver_ckc),
99 STATS_DESC_COUNTER(VCPU, deliver_cputm),
100 STATS_DESC_COUNTER(VCPU, deliver_external_call),
101 STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
102 STATS_DESC_COUNTER(VCPU, deliver_service_signal),
103 STATS_DESC_COUNTER(VCPU, deliver_virtio),
104 STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
105 STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
106 STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
107 STATS_DESC_COUNTER(VCPU, deliver_program),
108 STATS_DESC_COUNTER(VCPU, deliver_io),
109 STATS_DESC_COUNTER(VCPU, deliver_machine_check),
110 STATS_DESC_COUNTER(VCPU, exit_wait_state),
111 STATS_DESC_COUNTER(VCPU, inject_ckc),
112 STATS_DESC_COUNTER(VCPU, inject_cputm),
113 STATS_DESC_COUNTER(VCPU, inject_external_call),
114 STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
115 STATS_DESC_COUNTER(VCPU, inject_mchk),
116 STATS_DESC_COUNTER(VCPU, inject_pfault_init),
117 STATS_DESC_COUNTER(VCPU, inject_program),
118 STATS_DESC_COUNTER(VCPU, inject_restart),
119 STATS_DESC_COUNTER(VCPU, inject_set_prefix),
120 STATS_DESC_COUNTER(VCPU, inject_stop_signal),
121 STATS_DESC_COUNTER(VCPU, instruction_epsw),
122 STATS_DESC_COUNTER(VCPU, instruction_gs),
123 STATS_DESC_COUNTER(VCPU, instruction_io_other),
124 STATS_DESC_COUNTER(VCPU, instruction_lpsw),
125 STATS_DESC_COUNTER(VCPU, instruction_lpswe),
126 STATS_DESC_COUNTER(VCPU, instruction_pfmf),
127 STATS_DESC_COUNTER(VCPU, instruction_ptff),
128 STATS_DESC_COUNTER(VCPU, instruction_sck),
129 STATS_DESC_COUNTER(VCPU, instruction_sckpf),
130 STATS_DESC_COUNTER(VCPU, instruction_stidp),
131 STATS_DESC_COUNTER(VCPU, instruction_spx),
132 STATS_DESC_COUNTER(VCPU, instruction_stpx),
133 STATS_DESC_COUNTER(VCPU, instruction_stap),
134 STATS_DESC_COUNTER(VCPU, instruction_iske),
135 STATS_DESC_COUNTER(VCPU, instruction_ri),
136 STATS_DESC_COUNTER(VCPU, instruction_rrbe),
137 STATS_DESC_COUNTER(VCPU, instruction_sske),
138 STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
139 STATS_DESC_COUNTER(VCPU, instruction_stsi),
140 STATS_DESC_COUNTER(VCPU, instruction_stfl),
141 STATS_DESC_COUNTER(VCPU, instruction_tb),
142 STATS_DESC_COUNTER(VCPU, instruction_tpi),
143 STATS_DESC_COUNTER(VCPU, instruction_tprot),
144 STATS_DESC_COUNTER(VCPU, instruction_tsch),
145 STATS_DESC_COUNTER(VCPU, instruction_sie),
146 STATS_DESC_COUNTER(VCPU, instruction_essa),
147 STATS_DESC_COUNTER(VCPU, instruction_sthyi),
148 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
149 STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
150 STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
151 STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
152 STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
153 STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
154 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
155 STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
156 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
157 STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
158 STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
159 STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
160 STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
161 STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
162 STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
163 STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
Christian Borntraegerbb000f642021-07-26 17:01:08 +0200164 STATS_DESC_COUNTER(VCPU, instruction_diagnose_10),
165 STATS_DESC_COUNTER(VCPU, instruction_diagnose_44),
166 STATS_DESC_COUNTER(VCPU, instruction_diagnose_9c),
167 STATS_DESC_COUNTER(VCPU, diag_9c_ignored),
168 STATS_DESC_COUNTER(VCPU, diag_9c_forward),
169 STATS_DESC_COUNTER(VCPU, instruction_diagnose_258),
170 STATS_DESC_COUNTER(VCPU, instruction_diagnose_308),
171 STATS_DESC_COUNTER(VCPU, instruction_diagnose_500),
172 STATS_DESC_COUNTER(VCPU, instruction_diagnose_other),
Jing Zhangce55c042021-06-18 22:27:06 +0000173 STATS_DESC_COUNTER(VCPU, pfault_sync)
174};
Jing Zhangce55c042021-06-18 22:27:06 +0000175
176const struct kvm_stats_header kvm_vcpu_stats_header = {
177 .name_size = KVM_STATS_NAME_SIZE,
178 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
179 .id_offset = sizeof(struct kvm_stats_header),
180 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
181 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
182 sizeof(kvm_vcpu_stats_desc),
183};
184
David Hildenbranda411edf2016-02-02 15:41:22 +0100185/* allow nested virtualization in KVM (if enabled by user space) */
186static int nested;
187module_param(nested, int, S_IRUGO);
188MODULE_PARM_DESC(nested, "Nested virtualization support");
189
Janosch Franka4499382018-07-13 11:28:31 +0100190/* allow 1m huge page guest backing, if !nested */
191static int hpage;
192module_param(hpage, int, 0444);
193MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100194
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500195/* maximum percentage of steal time for polling. >100 is treated like 100 */
196static u8 halt_poll_max_steal = 10;
197module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000198MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500199
Michael Muellercc674ef2020-02-27 10:10:31 +0100200/* if set to true, the GISA will be initialized and used if available */
201static bool use_gisa = true;
202module_param(use_gisa, bool, 0644);
203MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
204
Pierre Morel87e28a12020-09-07 15:26:07 +0200205/* maximum diag9c forwarding per second */
206unsigned int diag9c_forwarding_hz;
207module_param(diag9c_forwarding_hz, uint, 0644);
208MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
209
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000210/*
211 * For now we handle at most 16 double words as this is what the s390 base
212 * kernel handles and stores in the prefix page. If we ever need to go beyond
213 * this, this requires changes to code, but the external uapi can stay.
214 */
215#define SIZE_INTERNAL 16
216
217/*
218 * Base feature mask that defines default mask for facilities. Consists of the
219 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
220 */
221static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
222/*
223 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
224 * and defines the facilities that can be enabled via a cpu model.
225 */
226static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
227
228static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200229{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000230 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
231 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
232 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
Sven Schnelle17e89e12021-05-05 22:01:10 +0200233 sizeof(stfle_fac_list));
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000234
235 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200236}
237
David Hildenbrand15c97052015-03-19 17:36:43 +0100238/* available cpu features supported by kvm */
239static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200240/* available subfunctions indicated via query / "test bit" */
241static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100242
Michael Mueller9d8d5782015-02-02 15:42:51 +0100243static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200244static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200245debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400246debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100247
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100248/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200249int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100250{
251 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200252 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100253}
254
Sean Christophersonb9904082020-03-21 13:25:55 -0700255int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700256{
257 return 0;
258}
259
Janosch Frank29b40f12019-09-30 04:19:18 -0400260/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100261static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
262 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400263static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200264
David Hildenbrand15757672018-02-07 12:46:45 +0100265static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
266{
267 u8 delta_idx = 0;
268
269 /*
270 * The TOD jumps by delta, we have to compensate this by adding
271 * -delta to the epoch.
272 */
273 delta = -delta;
274
275 /* sign-extension - we're adding to signed values below */
276 if ((s64)delta < 0)
277 delta_idx = -1;
278
279 scb->epoch += delta;
280 if (scb->ecd & ECD_MEF) {
281 scb->epdx += delta_idx;
282 if (scb->epoch < delta)
283 scb->epdx += 1;
284 }
285}
286
Fan Zhangfdf03652015-05-13 10:58:41 +0200287/*
288 * This callback is executed during stop_machine(). All CPUs are therefore
289 * temporarily stopped. In order not to change guest behavior, we have to
290 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
291 * so a CPU won't be stopped while calculating with the epoch.
292 */
293static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
294 void *v)
295{
296 struct kvm *kvm;
297 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000298 unsigned long i;
Fan Zhangfdf03652015-05-13 10:58:41 +0200299 unsigned long long *delta = v;
300
301 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200302 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100303 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
304 if (i == 0) {
305 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
306 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
307 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100308 if (vcpu->arch.cputm_enabled)
309 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100310 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100311 kvm_clock_sync_scb(vcpu->arch.vsie_block,
312 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200313 }
314 }
315 return NOTIFY_OK;
316}
317
318static struct notifier_block kvm_clock_notifier = {
319 .notifier_call = kvm_clock_sync,
320};
321
Sean Christophersonb9904082020-03-21 13:25:55 -0700322int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100323{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200324 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100325 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200326 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
327 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200328 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
329 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330 return 0;
331}
332
333void kvm_arch_hardware_unsetup(void)
334{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100335 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200336 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200337 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
338 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100339}
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341static void allow_cpu_feat(unsigned long nr)
342{
343 set_bit_inv(nr, kvm_s390_available_cpu_feat);
344}
345
David Hildenbrand0a763c72016-05-18 16:03:47 +0200346static inline int plo_test_bit(unsigned char nr)
347{
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200348 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100349 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200350
351 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200352 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200353 /* Parameter registers are ignored for "test bit" */
354 " plo 0,0,0,0(0)\n"
355 " ipm %0\n"
356 " srl %0,28\n"
357 : "=d" (cc)
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200358 : [function] "d" (function)
359 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200360 return cc == 0;
361}
362
Heiko Carstensd0dea732019-10-02 14:34:37 +0200363static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500364{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500365 asm volatile(
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200366 " lghi 0,0\n"
367 " lgr 1,%[query]\n"
368 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500369 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200370 :
Heiko Carstens4fa3b912021-06-21 16:03:56 +0200371 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
372 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500373}
374
Christian Borntraeger173aec22018-12-28 10:59:06 +0100375#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100376#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100377
David Hildenbrand22be5a132016-01-21 13:22:54 +0100378static void kvm_s390_cpu_feat_init(void)
379{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200380 int i;
381
382 for (i = 0; i < 256; ++i) {
383 if (plo_test_bit(i))
384 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
385 }
386
387 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400388 ptff(kvm_s390_available_subfunc.ptff,
389 sizeof(kvm_s390_available_subfunc.ptff),
390 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200391
392 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200393 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
394 kvm_s390_available_subfunc.kmac);
395 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kmc);
397 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.km);
399 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
400 kvm_s390_available_subfunc.kimd);
401 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
402 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200403 }
404 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200405 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200407 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200408 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kmctr);
410 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
411 kvm_s390_available_subfunc.kmf);
412 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
413 kvm_s390_available_subfunc.kmo);
414 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
415 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200416 }
417 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100418 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200419 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200420
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400421 if (test_facility(146)) /* MSA8 */
422 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
423 kvm_s390_available_subfunc.kma);
424
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100425 if (test_facility(155)) /* MSA9 */
426 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
427 kvm_s390_available_subfunc.kdsa);
428
Christian Borntraeger173aec22018-12-28 10:59:06 +0100429 if (test_facility(150)) /* SORTL */
430 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
431
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100432 if (test_facility(151)) /* DFLTCC */
433 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
434
David Hildenbrand22be5a132016-01-21 13:22:54 +0100435 if (MACHINE_HAS_ESOP)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200437 /*
438 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
439 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
440 */
441 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100442 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200443 return;
444 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100445 if (sclp.has_64bscao)
446 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100447 if (sclp.has_siif)
448 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100449 if (sclp.has_gpere)
450 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100451 if (sclp.has_gsls)
452 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100453 if (sclp.has_ib)
454 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100455 if (sclp.has_cei)
456 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100457 if (sclp.has_ibs)
458 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500459 if (sclp.has_kss)
460 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200461 /*
462 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
463 * all skey handling functions read/set the skey from the PGSTE
464 * instead of the real storage key.
465 *
466 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
467 * pages being detected as preserved although they are resident.
468 *
469 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
470 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
471 *
472 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
473 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
474 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
475 *
476 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
477 * cannot easily shadow the SCA because of the ipte lock.
478 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100479}
480
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100481int kvm_arch_init(void *opaque)
482{
Janosch Frankf76f6372019-10-02 03:56:27 -0400483 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100484
Christian Borntraeger78f26132015-07-22 15:50:58 +0200485 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
486 if (!kvm_s390_dbf)
487 return -ENOMEM;
488
Janosch Frank3e6c5562019-10-02 04:46:58 -0400489 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
490 if (!kvm_s390_dbf_uv)
491 goto out;
492
493 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
494 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400495 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200496
David Hildenbrand22be5a132016-01-21 13:22:54 +0100497 kvm_s390_cpu_feat_init();
498
Cornelia Huck84877d92014-09-02 10:27:35 +0100499 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100500 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
501 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100502 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400503 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100504 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100505
506 rc = kvm_s390_gib_init(GAL_ISC);
507 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400508 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100509
Michael Mueller308c3e62018-11-30 15:32:06 +0100510 return 0;
511
Janosch Frankf76f6372019-10-02 03:56:27 -0400512out:
513 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100514 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100515}
516
Christian Borntraeger78f26132015-07-22 15:50:58 +0200517void kvm_arch_exit(void)
518{
Michael Mueller1282c212019-01-31 09:52:40 +0100519 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200520 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400521 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200522}
523
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100524/* Section: device related */
525long kvm_arch_dev_ioctl(struct file *filp,
526 unsigned int ioctl, unsigned long arg)
527{
528 if (ioctl == KVM_S390_ENABLE_SIE)
529 return s390_enable_sie();
530 return -EINVAL;
531}
532
Alexander Graf784aa3d2014-07-14 18:27:35 +0200533int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100534{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100535 int r;
536
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200537 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100538 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200539 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100540 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100541#ifdef CONFIG_KVM_S390_UCONTROL
542 case KVM_CAP_S390_UCONTROL:
543#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200544 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100545 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200546 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100547 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100548 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100549 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200550 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200551 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200552 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200553 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100554 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100555 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200556 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100557 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400558 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100559 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200560 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200561 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100562 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100563 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500564 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400565 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400566 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100567 r = 1;
568 break;
Maxim Levitskya43b80b2021-04-01 16:54:47 +0300569 case KVM_CAP_SET_GUEST_DEBUG2:
570 r = KVM_GUESTDBG_VALID_MASK;
571 break;
Janosch Franka4499382018-07-13 11:28:31 +0100572 case KVM_CAP_S390_HPAGE_1M:
573 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100574 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100575 r = 1;
576 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100577 case KVM_CAP_S390_MEM_OP:
578 r = MEM_OP_MAX_SIZE;
579 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200580 case KVM_CAP_NR_VCPUS:
581 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200582 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100583 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200584 if (!kvm_s390_use_sca_entries())
585 r = KVM_MAX_VCPUS;
586 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100587 r = KVM_S390_ESCA_CPU_SLOTS;
Vitaly Kuznetsov82cc27e2021-11-16 17:34:42 +0100588 if (ext == KVM_CAP_NR_VCPUS)
589 r = min_t(unsigned int, num_online_cpus(), r);
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200590 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200591 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100592 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200593 break;
Eric Farman68c55752014-06-09 10:57:26 -0400594 case KVM_CAP_S390_VECTOR_REGISTERS:
595 r = MACHINE_HAS_VX;
596 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800597 case KVM_CAP_S390_RI:
598 r = test_facility(64);
599 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100600 case KVM_CAP_S390_GS:
601 r = test_facility(133);
602 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100603 case KVM_CAP_S390_BPB:
604 r = test_facility(82);
605 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500606 case KVM_CAP_S390_PROTECTED:
607 r = is_prot_virt_host();
608 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200609 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100610 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200611 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100612 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100613}
614
Sean Christopherson0dff0842020-02-18 13:07:29 -0800615void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400616{
Janosch Frank0959e162018-07-17 13:21:22 +0100617 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100619 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100621 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400622
Janosch Frank0959e162018-07-17 13:21:22 +0100623 /* Loop over all guest segments */
624 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400625 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100626 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
627 gaddr = gfn_to_gpa(cur_gfn);
628 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
629 if (kvm_is_error_hva(vmaddr))
630 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400631
Janosch Frank0959e162018-07-17 13:21:22 +0100632 bitmap_zero(bitmap, _PAGE_ENTRIES);
633 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
634 for (i = 0; i < _PAGE_ENTRIES; i++) {
635 if (test_bit(i, bitmap))
636 mark_page_dirty(kvm, cur_gfn + i);
637 }
638
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100639 if (fatal_signal_pending(current))
640 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100641 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400642 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400643}
644
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100645/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200646static void sca_del_vcpu(struct kvm_vcpu *vcpu);
647
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100648/*
649 * Get (and clear) the dirty memory log for a memory slot.
650 */
651int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
652 struct kvm_dirty_log *log)
653{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400654 int r;
655 unsigned long n;
656 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800657 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400658
Janosch Franke1e8a962017-02-02 16:39:31 +0100659 if (kvm_is_ucontrol(kvm))
660 return -EINVAL;
661
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400662 mutex_lock(&kvm->slots_lock);
663
664 r = -EINVAL;
665 if (log->slot >= KVM_USER_MEM_SLOTS)
666 goto out;
667
Sean Christopherson2a49f612020-02-18 13:07:30 -0800668 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400669 if (r)
670 goto out;
671
672 /* Clear the dirty log */
673 if (is_dirty) {
674 n = kvm_dirty_bitmap_bytes(memslot);
675 memset(memslot->dirty_bitmap, 0, n);
676 }
677 r = 0;
678out:
679 mutex_unlock(&kvm->slots_lock);
680 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100681}
682
David Hildenbrand6502a342016-06-21 14:19:51 +0200683static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
684{
Marc Zyngier46808a42021-11-16 16:04:02 +0000685 unsigned long i;
David Hildenbrand6502a342016-06-21 14:19:51 +0200686 struct kvm_vcpu *vcpu;
687
688 kvm_for_each_vcpu(i, vcpu, kvm) {
689 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
690 }
691}
692
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100693int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200694{
695 int r;
696
697 if (cap->flags)
698 return -EINVAL;
699
700 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200701 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200702 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200703 kvm->arch.use_irqchip = 1;
704 r = 0;
705 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200706 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200707 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200708 kvm->arch.user_sigp = 1;
709 r = 0;
710 break;
Eric Farman68c55752014-06-09 10:57:26 -0400711 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100712 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200713 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 r = -EBUSY;
715 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100716 set_kvm_facility(kvm->arch.model.fac_mask, 129);
717 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200718 if (test_facility(134)) {
719 set_kvm_facility(kvm->arch.model.fac_mask, 134);
720 set_kvm_facility(kvm->arch.model.fac_list, 134);
721 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100722 if (test_facility(135)) {
723 set_kvm_facility(kvm->arch.model.fac_mask, 135);
724 set_kvm_facility(kvm->arch.model.fac_list, 135);
725 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100726 if (test_facility(148)) {
727 set_kvm_facility(kvm->arch.model.fac_mask, 148);
728 set_kvm_facility(kvm->arch.model.fac_list, 148);
729 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100730 if (test_facility(152)) {
731 set_kvm_facility(kvm->arch.model.fac_mask, 152);
732 set_kvm_facility(kvm->arch.model.fac_list, 152);
733 }
Christian Borntraeger1f703d22021-01-25 13:39:45 +0100734 if (test_facility(192)) {
735 set_kvm_facility(kvm->arch.model.fac_mask, 192);
736 set_kvm_facility(kvm->arch.model.fac_list, 192);
737 }
Michael Mueller18280d82015-03-16 16:05:41 +0100738 r = 0;
739 } else
740 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100741 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200742 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
743 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400744 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800745 case KVM_CAP_S390_RI:
746 r = -EINVAL;
747 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200748 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800749 r = -EBUSY;
750 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100751 set_kvm_facility(kvm->arch.model.fac_mask, 64);
752 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800753 r = 0;
754 }
755 mutex_unlock(&kvm->lock);
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
757 r ? "(not available)" : "(success)");
758 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100759 case KVM_CAP_S390_AIS:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus) {
762 r = -EBUSY;
763 } else {
764 set_kvm_facility(kvm->arch.model.fac_mask, 72);
765 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100766 r = 0;
767 }
768 mutex_unlock(&kvm->lock);
769 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
770 r ? "(not available)" : "(success)");
771 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100772 case KVM_CAP_S390_GS:
773 r = -EINVAL;
774 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100775 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100776 r = -EBUSY;
777 } else if (test_facility(133)) {
778 set_kvm_facility(kvm->arch.model.fac_mask, 133);
779 set_kvm_facility(kvm->arch.model.fac_list, 133);
780 r = 0;
781 }
782 mutex_unlock(&kvm->lock);
783 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
784 r ? "(not available)" : "(success)");
785 break;
Janosch Franka4499382018-07-13 11:28:31 +0100786 case KVM_CAP_S390_HPAGE_1M:
787 mutex_lock(&kvm->lock);
788 if (kvm->created_vcpus)
789 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100790 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100791 r = -EINVAL;
792 else {
793 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700794 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100795 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700796 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100797 /*
798 * We might have to create fake 4k page
799 * tables. To avoid that the hardware works on
800 * stale PGSTEs, we emulate these instructions.
801 */
802 kvm->arch.use_skf = 0;
803 kvm->arch.use_pfmfi = 0;
804 }
805 mutex_unlock(&kvm->lock);
806 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
807 r ? "(not available)" : "(success)");
808 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100809 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200810 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100811 kvm->arch.user_stsi = 1;
812 r = 0;
813 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200814 case KVM_CAP_S390_USER_INSTR0:
815 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
816 kvm->arch.user_instr0 = 1;
817 icpt_operexc_on_all_vcpus(kvm);
818 r = 0;
819 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200820 default:
821 r = -EINVAL;
822 break;
823 }
824 return r;
825}
826
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100827static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
828{
829 int ret;
830
831 switch (attr->attr) {
832 case KVM_S390_VM_MEM_LIMIT_SIZE:
833 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200834 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100835 kvm->arch.mem_limit);
836 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100837 ret = -EFAULT;
838 break;
839 default:
840 ret = -ENXIO;
841 break;
842 }
843 return ret;
844}
845
846static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200847{
848 int ret;
849 unsigned int idx;
850 switch (attr->attr) {
851 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100852 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100853 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200854 break;
855
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200856 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200857 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100858 if (kvm->created_vcpus)
859 ret = -EBUSY;
860 else if (kvm->mm->context.allow_gmap_hpage_1m)
861 ret = -EINVAL;
862 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200863 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100864 /* Not compatible with cmma. */
865 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200866 ret = 0;
867 }
868 mutex_unlock(&kvm->lock);
869 break;
870 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100871 ret = -ENXIO;
872 if (!sclp.has_cmma)
873 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200874 ret = -EINVAL;
875 if (!kvm->arch.use_cmma)
876 break;
877
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200878 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200879 mutex_lock(&kvm->lock);
880 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200881 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200882 srcu_read_unlock(&kvm->srcu, idx);
883 mutex_unlock(&kvm->lock);
884 ret = 0;
885 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100886 case KVM_S390_VM_MEM_LIMIT_SIZE: {
887 unsigned long new_limit;
888
889 if (kvm_is_ucontrol(kvm))
890 return -EINVAL;
891
892 if (get_user(new_limit, (u64 __user *)attr->addr))
893 return -EFAULT;
894
Dominik Dingela3a92c32014-12-01 17:24:42 +0100895 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
896 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100897 return -E2BIG;
898
Dominik Dingela3a92c32014-12-01 17:24:42 +0100899 if (!new_limit)
900 return -EINVAL;
901
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100902 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100903 if (new_limit != KVM_S390_NO_MEM_LIMIT)
904 new_limit -= 1;
905
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100906 ret = -EBUSY;
907 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200908 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100909 /* gmap_create will round the limit up */
910 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100911
912 if (!new) {
913 ret = -ENOMEM;
914 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100915 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100916 new->private = kvm;
917 kvm->arch.gmap = new;
918 ret = 0;
919 }
920 }
921 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100922 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
923 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
924 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100925 break;
926 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200927 default:
928 ret = -ENXIO;
929 break;
930 }
931 return ret;
932}
933
Tony Krowiaka374e892014-09-03 10:13:53 +0200934static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
935
Tony Krowiak20c922f2018-04-22 11:37:03 -0400936void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200937{
938 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +0000939 unsigned long i;
Tony Krowiaka374e892014-09-03 10:13:53 +0200940
Tony Krowiak20c922f2018-04-22 11:37:03 -0400941 kvm_s390_vcpu_block_all(kvm);
942
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400943 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400944 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400945 /* recreate the shadow crycb by leaving the VSIE handler */
946 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
947 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400948
949 kvm_s390_vcpu_unblock_all(kvm);
950}
951
952static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
953{
Tony Krowiaka374e892014-09-03 10:13:53 +0200954 mutex_lock(&kvm->lock);
955 switch (attr->attr) {
956 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200957 if (!test_kvm_facility(kvm, 76)) {
958 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400959 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200960 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200961 get_random_bytes(
962 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
963 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
964 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200965 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 break;
967 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200968 if (!test_kvm_facility(kvm, 76)) {
969 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400970 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200971 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200972 get_random_bytes(
973 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
974 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
975 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200976 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200977 break;
978 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200979 if (!test_kvm_facility(kvm, 76)) {
980 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400981 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200982 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200983 kvm->arch.crypto.aes_kw = 0;
984 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
985 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200986 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200987 break;
988 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200989 if (!test_kvm_facility(kvm, 76)) {
990 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400991 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200992 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200993 kvm->arch.crypto.dea_kw = 0;
994 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
995 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200996 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200997 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400998 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
999 if (!ap_instructions_available()) {
1000 mutex_unlock(&kvm->lock);
1001 return -EOPNOTSUPP;
1002 }
1003 kvm->arch.crypto.apie = 1;
1004 break;
1005 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1006 if (!ap_instructions_available()) {
1007 mutex_unlock(&kvm->lock);
1008 return -EOPNOTSUPP;
1009 }
1010 kvm->arch.crypto.apie = 0;
1011 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001012 default:
1013 mutex_unlock(&kvm->lock);
1014 return -ENXIO;
1015 }
1016
Tony Krowiak20c922f2018-04-22 11:37:03 -04001017 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +02001018 mutex_unlock(&kvm->lock);
1019 return 0;
1020}
1021
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001022static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
1023{
Marc Zyngier46808a42021-11-16 16:04:02 +00001024 unsigned long cx;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 struct kvm_vcpu *vcpu;
1026
1027 kvm_for_each_vcpu(cx, vcpu, kvm)
1028 kvm_s390_sync_request(req, vcpu);
1029}
1030
1031/*
1032 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001033 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001034 */
1035static int kvm_s390_vm_start_migration(struct kvm *kvm)
1036{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001037 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001038 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001039 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001040 int slotnr;
1041
1042 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001043 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001044 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001045 slots = kvm_memslots(kvm);
1046 if (!slots || !slots->used_slots)
1047 return -EINVAL;
1048
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001049 if (!kvm->arch.use_cmma) {
1050 kvm->arch.migration_mode = 1;
1051 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001053 /* mark all the pages in active slots as dirty */
1054 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1055 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001056 if (!ms->dirty_bitmap)
1057 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001058 /*
1059 * The second half of the bitmap is only used on x86,
1060 * and would be wasted otherwise, so we put it to good
1061 * use here to keep track of the state of the storage
1062 * attributes.
1063 */
1064 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1065 ram_pages += ms->npages;
1066 }
1067 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1068 kvm->arch.migration_mode = 1;
1069 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070 return 0;
1071}
1072
1073/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001074 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001075 * kvm_s390_vm_start_migration.
1076 */
1077static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1078{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001079 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001080 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001081 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001082 kvm->arch.migration_mode = 0;
1083 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001084 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001085 return 0;
1086}
1087
1088static int kvm_s390_vm_set_migration(struct kvm *kvm,
1089 struct kvm_device_attr *attr)
1090{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001091 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001092
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001093 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001094 switch (attr->attr) {
1095 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001096 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001097 break;
1098 case KVM_S390_VM_MIGRATION_STOP:
1099 res = kvm_s390_vm_stop_migration(kvm);
1100 break;
1101 default:
1102 break;
1103 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001104 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001105
1106 return res;
1107}
1108
1109static int kvm_s390_vm_get_migration(struct kvm *kvm,
1110 struct kvm_device_attr *attr)
1111{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001112 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001113
1114 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1115 return -ENXIO;
1116
1117 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1118 return -EFAULT;
1119 return 0;
1120}
1121
Collin L. Walling8fa16962016-07-26 15:29:44 -04001122static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1123{
1124 struct kvm_s390_vm_tod_clock gtod;
1125
1126 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1127 return -EFAULT;
1128
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001130 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001131 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001132
1133 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1134 gtod.epoch_idx, gtod.tod);
1135
1136 return 0;
1137}
1138
Jason J. Herne72f25022014-11-25 09:46:02 -05001139static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1140{
1141 u8 gtod_high;
1142
1143 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1144 sizeof(gtod_high)))
1145 return -EFAULT;
1146
1147 if (gtod_high != 0)
1148 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001149 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001150
1151 return 0;
1152}
1153
1154static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1155{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001156 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001157
David Hildenbrand0e7def52018-02-07 12:46:43 +01001158 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1159 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001160 return -EFAULT;
1161
David Hildenbrand0e7def52018-02-07 12:46:43 +01001162 kvm_s390_set_tod_clock(kvm, &gtod);
1163 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001164 return 0;
1165}
1166
1167static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1168{
1169 int ret;
1170
1171 if (attr->flags)
1172 return -EINVAL;
1173
1174 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001175 case KVM_S390_VM_TOD_EXT:
1176 ret = kvm_s390_set_tod_ext(kvm, attr);
1177 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001178 case KVM_S390_VM_TOD_HIGH:
1179 ret = kvm_s390_set_tod_high(kvm, attr);
1180 break;
1181 case KVM_S390_VM_TOD_LOW:
1182 ret = kvm_s390_set_tod_low(kvm, attr);
1183 break;
1184 default:
1185 ret = -ENXIO;
1186 break;
1187 }
1188 return ret;
1189}
1190
David Hildenbrand33d1b272018-04-27 14:36:13 +02001191static void kvm_s390_get_tod_clock(struct kvm *kvm,
1192 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001193{
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001194 union tod_clock clk;
Collin L. Walling8fa16962016-07-26 15:29:44 -04001195
1196 preempt_disable();
1197
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001198 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001199
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001200 gtod->tod = clk.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001201 gtod->epoch_idx = 0;
1202 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01001203 gtod->epoch_idx = clk.ei + kvm->arch.epdx;
1204 if (gtod->tod < clk.tod)
David Hildenbrand33d1b272018-04-27 14:36:13 +02001205 gtod->epoch_idx += 1;
1206 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001207
1208 preempt_enable();
1209}
1210
1211static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1212{
1213 struct kvm_s390_vm_tod_clock gtod;
1214
1215 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001216 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001217 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1218 return -EFAULT;
1219
1220 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1221 gtod.epoch_idx, gtod.tod);
1222 return 0;
1223}
1224
Jason J. Herne72f25022014-11-25 09:46:02 -05001225static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1226{
1227 u8 gtod_high = 0;
1228
1229 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1230 sizeof(gtod_high)))
1231 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001232 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001233
1234 return 0;
1235}
1236
1237static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1238{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001239 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001240
David Hildenbrand60417fc2015-09-29 16:20:36 +02001241 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001242 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1243 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001244 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001245
1246 return 0;
1247}
1248
1249static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1250{
1251 int ret;
1252
1253 if (attr->flags)
1254 return -EINVAL;
1255
1256 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001257 case KVM_S390_VM_TOD_EXT:
1258 ret = kvm_s390_get_tod_ext(kvm, attr);
1259 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001260 case KVM_S390_VM_TOD_HIGH:
1261 ret = kvm_s390_get_tod_high(kvm, attr);
1262 break;
1263 case KVM_S390_VM_TOD_LOW:
1264 ret = kvm_s390_get_tod_low(kvm, attr);
1265 break;
1266 default:
1267 ret = -ENXIO;
1268 break;
1269 }
1270 return ret;
1271}
1272
Michael Mueller658b6ed2015-02-02 15:49:35 +01001273static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1274{
1275 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001276 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001277 int ret = 0;
1278
1279 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001280 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001281 ret = -EBUSY;
1282 goto out;
1283 }
Christian Borntraegerc4196212020-11-06 08:34:23 +01001284 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001285 if (!proc) {
1286 ret = -ENOMEM;
1287 goto out;
1288 }
1289 if (!copy_from_user(proc, (void __user *)attr->addr,
1290 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001291 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001292 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1293 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001294 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001295 if (proc->ibc > unblocked_ibc)
1296 kvm->arch.model.ibc = unblocked_ibc;
1297 else if (proc->ibc < lowest_ibc)
1298 kvm->arch.model.ibc = lowest_ibc;
1299 else
1300 kvm->arch.model.ibc = proc->ibc;
1301 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001302 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001303 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001304 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1305 kvm->arch.model.ibc,
1306 kvm->arch.model.cpuid);
1307 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1308 kvm->arch.model.fac_list[0],
1309 kvm->arch.model.fac_list[1],
1310 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001311 } else
1312 ret = -EFAULT;
1313 kfree(proc);
1314out:
1315 mutex_unlock(&kvm->lock);
1316 return ret;
1317}
1318
David Hildenbrand15c97052015-03-19 17:36:43 +01001319static int kvm_s390_set_processor_feat(struct kvm *kvm,
1320 struct kvm_device_attr *attr)
1321{
1322 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001323
1324 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1325 return -EFAULT;
1326 if (!bitmap_subset((unsigned long *) data.feat,
1327 kvm_s390_available_cpu_feat,
1328 KVM_S390_VM_CPU_FEAT_NR_BITS))
1329 return -EINVAL;
1330
1331 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001332 if (kvm->created_vcpus) {
1333 mutex_unlock(&kvm->lock);
1334 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001335 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001336 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1337 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001338 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001339 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1340 data.feat[0],
1341 data.feat[1],
1342 data.feat[2]);
1343 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001344}
1345
David Hildenbrand0a763c72016-05-18 16:03:47 +02001346static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1347 struct kvm_device_attr *attr)
1348{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001349 mutex_lock(&kvm->lock);
1350 if (kvm->created_vcpus) {
1351 mutex_unlock(&kvm->lock);
1352 return -EBUSY;
1353 }
1354
1355 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1356 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1357 mutex_unlock(&kvm->lock);
1358 return -EFAULT;
1359 }
1360 mutex_unlock(&kvm->lock);
1361
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001362 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1363 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1364 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1367 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1370 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1373 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1374 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1376 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1379 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1382 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1383 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1385 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1386 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1387 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1388 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1389 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1391 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1392 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1393 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1394 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1395 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1396 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1397 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1398 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1399 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1400 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1401 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1402 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1403 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1404 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1405 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001406 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1407 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1408 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001409 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1410 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1411 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1412 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1413 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001414 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1415 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1416 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1417 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1418 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001419
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001420 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001421}
1422
Michael Mueller658b6ed2015-02-02 15:49:35 +01001423static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1424{
1425 int ret = -ENXIO;
1426
1427 switch (attr->attr) {
1428 case KVM_S390_VM_CPU_PROCESSOR:
1429 ret = kvm_s390_set_processor(kvm, attr);
1430 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001431 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1432 ret = kvm_s390_set_processor_feat(kvm, attr);
1433 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001434 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1435 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1436 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001437 }
1438 return ret;
1439}
1440
1441static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1442{
1443 struct kvm_s390_vm_cpu_processor *proc;
1444 int ret = 0;
1445
Christian Borntraegerc4196212020-11-06 08:34:23 +01001446 proc = kzalloc(sizeof(*proc), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001447 if (!proc) {
1448 ret = -ENOMEM;
1449 goto out;
1450 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001451 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001452 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001453 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1454 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001455 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1456 kvm->arch.model.ibc,
1457 kvm->arch.model.cpuid);
1458 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1459 kvm->arch.model.fac_list[0],
1460 kvm->arch.model.fac_list[1],
1461 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001462 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1463 ret = -EFAULT;
1464 kfree(proc);
1465out:
1466 return ret;
1467}
1468
1469static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1470{
1471 struct kvm_s390_vm_cpu_machine *mach;
1472 int ret = 0;
1473
Christian Borntraegerc4196212020-11-06 08:34:23 +01001474 mach = kzalloc(sizeof(*mach), GFP_KERNEL_ACCOUNT);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001475 if (!mach) {
1476 ret = -ENOMEM;
1477 goto out;
1478 }
1479 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001480 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001481 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001482 S390_ARCH_FAC_LIST_SIZE_BYTE);
Sven Schnelle17e89e12021-05-05 22:01:10 +02001483 memcpy((unsigned long *)&mach->fac_list, stfle_fac_list,
1484 sizeof(stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001485 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1486 kvm->arch.model.ibc,
1487 kvm->arch.model.cpuid);
1488 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1489 mach->fac_mask[0],
1490 mach->fac_mask[1],
1491 mach->fac_mask[2]);
1492 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1493 mach->fac_list[0],
1494 mach->fac_list[1],
1495 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001496 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1497 ret = -EFAULT;
1498 kfree(mach);
1499out:
1500 return ret;
1501}
1502
David Hildenbrand15c97052015-03-19 17:36:43 +01001503static int kvm_s390_get_processor_feat(struct kvm *kvm,
1504 struct kvm_device_attr *attr)
1505{
1506 struct kvm_s390_vm_cpu_feat data;
1507
1508 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1509 KVM_S390_VM_CPU_FEAT_NR_BITS);
1510 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1511 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001512 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1513 data.feat[0],
1514 data.feat[1],
1515 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001516 return 0;
1517}
1518
1519static int kvm_s390_get_machine_feat(struct kvm *kvm,
1520 struct kvm_device_attr *attr)
1521{
1522 struct kvm_s390_vm_cpu_feat data;
1523
1524 bitmap_copy((unsigned long *) data.feat,
1525 kvm_s390_available_cpu_feat,
1526 KVM_S390_VM_CPU_FEAT_NR_BITS);
1527 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1528 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001529 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1530 data.feat[0],
1531 data.feat[1],
1532 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001533 return 0;
1534}
1535
David Hildenbrand0a763c72016-05-18 16:03:47 +02001536static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1537 struct kvm_device_attr *attr)
1538{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001539 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1540 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1541 return -EFAULT;
1542
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001543 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1545 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1548 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1551 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1554 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1557 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1560 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1563 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1564 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1566 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1567 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1568 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1569 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1570 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1572 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1573 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1574 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1575 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1576 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1577 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1578 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1579 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1580 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1581 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1582 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1583 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1584 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1586 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001587 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1588 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1589 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001590 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1591 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1592 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1593 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1594 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001595 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1597 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1598 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1599 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001600
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001601 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001602}
1603
1604static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1605 struct kvm_device_attr *attr)
1606{
1607 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1608 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1609 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001610
1611 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1612 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1613 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1614 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1615 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1616 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1619 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1622 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1623 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1624 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1625 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1628 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1631 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1632 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1633 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1634 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1635 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1636 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1637 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1638 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1639 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1640 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1641 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1642 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1643 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1644 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1645 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1646 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1647 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1648 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1649 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1650 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1651 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1652 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1653 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1654 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001655 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1656 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1657 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001658 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1659 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1660 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1661 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1662 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001663 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1664 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1665 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1666 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1667 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001668
David Hildenbrand0a763c72016-05-18 16:03:47 +02001669 return 0;
1670}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001671
Michael Mueller658b6ed2015-02-02 15:49:35 +01001672static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1673{
1674 int ret = -ENXIO;
1675
1676 switch (attr->attr) {
1677 case KVM_S390_VM_CPU_PROCESSOR:
1678 ret = kvm_s390_get_processor(kvm, attr);
1679 break;
1680 case KVM_S390_VM_CPU_MACHINE:
1681 ret = kvm_s390_get_machine(kvm, attr);
1682 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001683 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1684 ret = kvm_s390_get_processor_feat(kvm, attr);
1685 break;
1686 case KVM_S390_VM_CPU_MACHINE_FEAT:
1687 ret = kvm_s390_get_machine_feat(kvm, attr);
1688 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001689 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1690 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1691 break;
1692 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1693 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1694 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001695 }
1696 return ret;
1697}
1698
Dominik Dingelf2061652014-04-09 13:13:00 +02001699static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1700{
1701 int ret;
1702
1703 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001704 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001705 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001706 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001707 case KVM_S390_VM_TOD:
1708 ret = kvm_s390_set_tod(kvm, attr);
1709 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001710 case KVM_S390_VM_CPU_MODEL:
1711 ret = kvm_s390_set_cpu_model(kvm, attr);
1712 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001713 case KVM_S390_VM_CRYPTO:
1714 ret = kvm_s390_vm_set_crypto(kvm, attr);
1715 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001716 case KVM_S390_VM_MIGRATION:
1717 ret = kvm_s390_vm_set_migration(kvm, attr);
1718 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001719 default:
1720 ret = -ENXIO;
1721 break;
1722 }
1723
1724 return ret;
1725}
1726
1727static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1728{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001729 int ret;
1730
1731 switch (attr->group) {
1732 case KVM_S390_VM_MEM_CTRL:
1733 ret = kvm_s390_get_mem_control(kvm, attr);
1734 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001735 case KVM_S390_VM_TOD:
1736 ret = kvm_s390_get_tod(kvm, attr);
1737 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001738 case KVM_S390_VM_CPU_MODEL:
1739 ret = kvm_s390_get_cpu_model(kvm, attr);
1740 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001741 case KVM_S390_VM_MIGRATION:
1742 ret = kvm_s390_vm_get_migration(kvm, attr);
1743 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001744 default:
1745 ret = -ENXIO;
1746 break;
1747 }
1748
1749 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001750}
1751
1752static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1753{
1754 int ret;
1755
1756 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001757 case KVM_S390_VM_MEM_CTRL:
1758 switch (attr->attr) {
1759 case KVM_S390_VM_MEM_ENABLE_CMMA:
1760 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001761 ret = sclp.has_cmma ? 0 : -ENXIO;
1762 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001763 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001764 ret = 0;
1765 break;
1766 default:
1767 ret = -ENXIO;
1768 break;
1769 }
1770 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001771 case KVM_S390_VM_TOD:
1772 switch (attr->attr) {
1773 case KVM_S390_VM_TOD_LOW:
1774 case KVM_S390_VM_TOD_HIGH:
1775 ret = 0;
1776 break;
1777 default:
1778 ret = -ENXIO;
1779 break;
1780 }
1781 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001782 case KVM_S390_VM_CPU_MODEL:
1783 switch (attr->attr) {
1784 case KVM_S390_VM_CPU_PROCESSOR:
1785 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001786 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1787 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001788 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001789 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001790 ret = 0;
1791 break;
1792 default:
1793 ret = -ENXIO;
1794 break;
1795 }
1796 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001797 case KVM_S390_VM_CRYPTO:
1798 switch (attr->attr) {
1799 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1800 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1801 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1802 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1803 ret = 0;
1804 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001805 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1806 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1807 ret = ap_instructions_available() ? 0 : -ENXIO;
1808 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001809 default:
1810 ret = -ENXIO;
1811 break;
1812 }
1813 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001814 case KVM_S390_VM_MIGRATION:
1815 ret = 0;
1816 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001817 default:
1818 ret = -ENXIO;
1819 break;
1820 }
1821
1822 return ret;
1823}
1824
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001825static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1826{
1827 uint8_t *keys;
1828 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001829 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830
1831 if (args->flags != 0)
1832 return -EINVAL;
1833
1834 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001835 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001836 return KVM_S390_GET_SKEYS_NONE;
1837
1838 /* Enforce sane limit on memory allocation */
1839 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1840 return -EINVAL;
1841
Christian Borntraegerc4196212020-11-06 08:34:23 +01001842 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001843 if (!keys)
1844 return -ENOMEM;
1845
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001846 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001847 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001848 for (i = 0; i < args->count; i++) {
1849 hva = gfn_to_hva(kvm, args->start_gfn + i);
1850 if (kvm_is_error_hva(hva)) {
1851 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001852 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001853 }
1854
David Hildenbrand154c8c12016-05-09 11:22:34 +02001855 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1856 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001857 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001858 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001859 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001860 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001861
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001862 if (!r) {
1863 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1864 sizeof(uint8_t) * args->count);
1865 if (r)
1866 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001867 }
1868
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001869 kvfree(keys);
1870 return r;
1871}
1872
1873static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1874{
1875 uint8_t *keys;
1876 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001877 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001878 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001879
1880 if (args->flags != 0)
1881 return -EINVAL;
1882
1883 /* Enforce sane limit on memory allocation */
1884 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1885 return -EINVAL;
1886
Christian Borntraegerc4196212020-11-06 08:34:23 +01001887 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL_ACCOUNT);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001888 if (!keys)
1889 return -ENOMEM;
1890
1891 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1892 sizeof(uint8_t) * args->count);
1893 if (r) {
1894 r = -EFAULT;
1895 goto out;
1896 }
1897
1898 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001899 r = s390_enable_skey();
1900 if (r)
1901 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001902
Janosch Frankbd096f62018-07-18 13:40:22 +01001903 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001904 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001905 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001906 while (i < args->count) {
1907 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001908 hva = gfn_to_hva(kvm, args->start_gfn + i);
1909 if (kvm_is_error_hva(hva)) {
1910 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001911 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001912 }
1913
1914 /* Lowest order bit is reserved */
1915 if (keys[i] & 0x01) {
1916 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001917 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001918 }
1919
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001920 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001921 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001922 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001923 FAULT_FLAG_WRITE, &unlocked);
1924 if (r)
1925 break;
1926 }
1927 if (!r)
1928 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001929 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001930 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001931 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001932out:
1933 kvfree(keys);
1934 return r;
1935}
1936
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001937/*
1938 * Base address and length must be sent at the start of each block, therefore
1939 * it's cheaper to send some clean data, as long as it's less than the size of
1940 * two longs.
1941 */
1942#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1943/* for consistency */
1944#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1945
1946/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001947 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1948 * address falls in a hole. In that case the index of one of the memslots
1949 * bordering the hole is returned.
1950 */
1951static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1952{
1953 int start = 0, end = slots->used_slots;
David Matlack87689272021-08-04 22:28:38 +00001954 int slot = atomic_read(&slots->last_used_slot);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001955 struct kvm_memory_slot *memslots = slots->memslots;
1956
1957 if (gfn >= memslots[slot].base_gfn &&
1958 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1959 return slot;
1960
1961 while (start < end) {
1962 slot = start + (end - start) / 2;
1963
1964 if (gfn >= memslots[slot].base_gfn)
1965 end = slot;
1966 else
1967 start = slot + 1;
1968 }
1969
Sean Christopherson97daa022020-04-07 23:40:59 -07001970 if (start >= slots->used_slots)
1971 return slots->used_slots - 1;
1972
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001973 if (gfn >= memslots[start].base_gfn &&
1974 gfn < memslots[start].base_gfn + memslots[start].npages) {
David Matlack87689272021-08-04 22:28:38 +00001975 atomic_set(&slots->last_used_slot, start);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001976 }
1977
1978 return start;
1979}
1980
1981static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1982 u8 *res, unsigned long bufsize)
1983{
1984 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1985
1986 args->count = 0;
1987 while (args->count < bufsize) {
1988 hva = gfn_to_hva(kvm, cur_gfn);
1989 /*
1990 * We return an error if the first value was invalid, but we
1991 * return successfully if at least one value was copied.
1992 */
1993 if (kvm_is_error_hva(hva))
1994 return args->count ? 0 : -EFAULT;
1995 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1996 pgstev = 0;
1997 res[args->count++] = (pgstev >> 24) & 0x43;
1998 cur_gfn++;
1999 }
2000
2001 return 0;
2002}
2003
2004static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
2005 unsigned long cur_gfn)
2006{
2007 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
2008 struct kvm_memory_slot *ms = slots->memslots + slotidx;
2009 unsigned long ofs = cur_gfn - ms->base_gfn;
2010
2011 if (ms->base_gfn + ms->npages <= cur_gfn) {
2012 slotidx--;
2013 /* If we are above the highest slot, wrap around */
2014 if (slotidx < 0)
2015 slotidx = slots->used_slots - 1;
2016
2017 ms = slots->memslots + slotidx;
2018 ofs = 0;
2019 }
2020 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
2021 while ((slotidx > 0) && (ofs >= ms->npages)) {
2022 slotidx--;
2023 ms = slots->memslots + slotidx;
2024 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
2025 }
2026 return ms->base_gfn + ofs;
2027}
2028
2029static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2030 u8 *res, unsigned long bufsize)
2031{
2032 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2033 struct kvm_memslots *slots = kvm_memslots(kvm);
2034 struct kvm_memory_slot *ms;
2035
Sean Christopherson0774a962020-03-20 13:55:40 -07002036 if (unlikely(!slots->used_slots))
2037 return 0;
2038
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002039 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2040 ms = gfn_to_memslot(kvm, cur_gfn);
2041 args->count = 0;
2042 args->start_gfn = cur_gfn;
2043 if (!ms)
2044 return 0;
2045 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2046 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2047
2048 while (args->count < bufsize) {
2049 hva = gfn_to_hva(kvm, cur_gfn);
2050 if (kvm_is_error_hva(hva))
2051 return 0;
2052 /* Decrement only if we actually flipped the bit to 0 */
2053 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2054 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2055 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2056 pgstev = 0;
2057 /* Save the value */
2058 res[args->count++] = (pgstev >> 24) & 0x43;
2059 /* If the next bit is too far away, stop. */
2060 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2061 return 0;
2062 /* If we reached the previous "next", find the next one */
2063 if (cur_gfn == next_gfn)
2064 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2065 /* Reached the end of memory or of the buffer, stop */
2066 if ((next_gfn >= mem_end) ||
2067 (next_gfn - args->start_gfn >= bufsize))
2068 return 0;
2069 cur_gfn++;
2070 /* Reached the end of the current memslot, take the next one. */
2071 if (cur_gfn - ms->base_gfn >= ms->npages) {
2072 ms = gfn_to_memslot(kvm, cur_gfn);
2073 if (!ms)
2074 return 0;
2075 }
2076 }
2077 return 0;
2078}
2079
2080/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002081 * This function searches for the next page with dirty CMMA attributes, and
2082 * saves the attributes in the buffer up to either the end of the buffer or
2083 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2084 * no trailing clean bytes are saved.
2085 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2086 * output buffer will indicate 0 as length.
2087 */
2088static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2089 struct kvm_s390_cmma_log *args)
2090{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002091 unsigned long bufsize;
2092 int srcu_idx, peek, ret;
2093 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002094
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002095 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002096 return -ENXIO;
2097 /* Invalid/unsupported flags were specified */
2098 if (args->flags & ~KVM_S390_CMMA_PEEK)
2099 return -EINVAL;
2100 /* Migration mode query, and we are not doing a migration */
2101 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002102 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002103 return -EINVAL;
2104 /* CMMA is disabled or was not used, or the buffer has length zero */
2105 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002106 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002107 memset(args, 0, sizeof(*args));
2108 return 0;
2109 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002110 /* We are not peeking, and there are no dirty pages */
2111 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2112 memset(args, 0, sizeof(*args));
2113 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002114 }
2115
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002116 values = vmalloc(bufsize);
2117 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002118 return -ENOMEM;
2119
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002120 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002121 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002122 if (peek)
2123 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2124 else
2125 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002126 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002127 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002128
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002129 if (kvm->arch.migration_mode)
2130 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2131 else
2132 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002133
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002134 if (copy_to_user((void __user *)args->values, values, args->count))
2135 ret = -EFAULT;
2136
2137 vfree(values);
2138 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002139}
2140
2141/*
2142 * This function sets the CMMA attributes for the given pages. If the input
2143 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002144 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002145 */
2146static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2147 const struct kvm_s390_cmma_log *args)
2148{
2149 unsigned long hva, mask, pgstev, i;
2150 uint8_t *bits;
2151 int srcu_idx, r = 0;
2152
2153 mask = args->mask;
2154
2155 if (!kvm->arch.use_cmma)
2156 return -ENXIO;
2157 /* invalid/unsupported flags */
2158 if (args->flags != 0)
2159 return -EINVAL;
2160 /* Enforce sane limit on memory allocation */
2161 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2162 return -EINVAL;
2163 /* Nothing to do */
2164 if (args->count == 0)
2165 return 0;
2166
Kees Cook42bc47b2018-06-12 14:27:11 -07002167 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002168 if (!bits)
2169 return -ENOMEM;
2170
2171 r = copy_from_user(bits, (void __user *)args->values, args->count);
2172 if (r) {
2173 r = -EFAULT;
2174 goto out;
2175 }
2176
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002177 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002178 srcu_idx = srcu_read_lock(&kvm->srcu);
2179 for (i = 0; i < args->count; i++) {
2180 hva = gfn_to_hva(kvm, args->start_gfn + i);
2181 if (kvm_is_error_hva(hva)) {
2182 r = -EFAULT;
2183 break;
2184 }
2185
2186 pgstev = bits[i];
2187 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002188 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002189 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2190 }
2191 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002192 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002193
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002194 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002195 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002196 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002197 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002198 }
2199out:
2200 vfree(bits);
2201 return r;
2202}
2203
Janosch Frank29b40f12019-09-30 04:19:18 -04002204static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2205{
2206 struct kvm_vcpu *vcpu;
2207 u16 rc, rrc;
2208 int ret = 0;
Marc Zyngier46808a42021-11-16 16:04:02 +00002209 unsigned long i;
Janosch Frank29b40f12019-09-30 04:19:18 -04002210
2211 /*
2212 * We ignore failures and try to destroy as many CPUs as possible.
2213 * At the same time we must not free the assigned resources when
2214 * this fails, as the ultravisor has still access to that memory.
2215 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2216 * behind.
2217 * We want to return the first failure rc and rrc, though.
2218 */
2219 kvm_for_each_vcpu(i, vcpu, kvm) {
2220 mutex_lock(&vcpu->mutex);
2221 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2222 *rcp = rc;
2223 *rrcp = rrc;
2224 ret = -EIO;
2225 }
2226 mutex_unlock(&vcpu->mutex);
2227 }
2228 return ret;
2229}
2230
2231static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2232{
Marc Zyngier46808a42021-11-16 16:04:02 +00002233 unsigned long i;
2234 int r = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04002235 u16 dummy;
2236
2237 struct kvm_vcpu *vcpu;
2238
2239 kvm_for_each_vcpu(i, vcpu, kvm) {
2240 mutex_lock(&vcpu->mutex);
2241 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2242 mutex_unlock(&vcpu->mutex);
2243 if (r)
2244 break;
2245 }
2246 if (r)
2247 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2248 return r;
2249}
2250
2251static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2252{
2253 int r = 0;
2254 u16 dummy;
2255 void __user *argp = (void __user *)cmd->data;
2256
2257 switch (cmd->cmd) {
2258 case KVM_PV_ENABLE: {
2259 r = -EINVAL;
2260 if (kvm_s390_pv_is_protected(kvm))
2261 break;
2262
2263 /*
2264 * FMT 4 SIE needs esca. As we never switch back to bsca from
2265 * esca, we need no cleanup in the error cases below
2266 */
2267 r = sca_switch_to_extended(kvm);
2268 if (r)
2269 break;
2270
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002271 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002272 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002273 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002274 if (r)
2275 break;
2276
Janosch Frank29b40f12019-09-30 04:19:18 -04002277 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2278 if (r)
2279 break;
2280
2281 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2282 if (r)
2283 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002284
2285 /* we need to block service interrupts from now on */
2286 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002287 break;
2288 }
2289 case KVM_PV_DISABLE: {
2290 r = -EINVAL;
2291 if (!kvm_s390_pv_is_protected(kvm))
2292 break;
2293
2294 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2295 /*
2296 * If a CPU could not be destroyed, destroy VM will also fail.
2297 * There is no point in trying to destroy it. Instead return
2298 * the rc and rrc from the first CPU that failed destroying.
2299 */
2300 if (r)
2301 break;
2302 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002303
2304 /* no need to block service interrupts any more */
2305 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002306 break;
2307 }
2308 case KVM_PV_SET_SEC_PARMS: {
2309 struct kvm_s390_pv_sec_parm parms = {};
2310 void *hdr;
2311
2312 r = -EINVAL;
2313 if (!kvm_s390_pv_is_protected(kvm))
2314 break;
2315
2316 r = -EFAULT;
2317 if (copy_from_user(&parms, argp, sizeof(parms)))
2318 break;
2319
2320 /* Currently restricted to 8KB */
2321 r = -EINVAL;
2322 if (parms.length > PAGE_SIZE * 2)
2323 break;
2324
2325 r = -ENOMEM;
2326 hdr = vmalloc(parms.length);
2327 if (!hdr)
2328 break;
2329
2330 r = -EFAULT;
2331 if (!copy_from_user(hdr, (void __user *)parms.origin,
2332 parms.length))
2333 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2334 &cmd->rc, &cmd->rrc);
2335
2336 vfree(hdr);
2337 break;
2338 }
2339 case KVM_PV_UNPACK: {
2340 struct kvm_s390_pv_unp unp = {};
2341
2342 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002343 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002344 break;
2345
2346 r = -EFAULT;
2347 if (copy_from_user(&unp, argp, sizeof(unp)))
2348 break;
2349
2350 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2351 &cmd->rc, &cmd->rrc);
2352 break;
2353 }
2354 case KVM_PV_VERIFY: {
2355 r = -EINVAL;
2356 if (!kvm_s390_pv_is_protected(kvm))
2357 break;
2358
2359 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2360 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2361 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2362 cmd->rrc);
2363 break;
2364 }
Janosch Franke0d27732019-05-09 13:07:21 +02002365 case KVM_PV_PREP_RESET: {
2366 r = -EINVAL;
2367 if (!kvm_s390_pv_is_protected(kvm))
2368 break;
2369
2370 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2371 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2372 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2373 cmd->rc, cmd->rrc);
2374 break;
2375 }
2376 case KVM_PV_UNSHARE_ALL: {
2377 r = -EINVAL;
2378 if (!kvm_s390_pv_is_protected(kvm))
2379 break;
2380
2381 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2382 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2383 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2384 cmd->rc, cmd->rrc);
2385 break;
2386 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002387 default:
2388 r = -ENOTTY;
2389 }
2390 return r;
2391}
2392
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002393long kvm_arch_vm_ioctl(struct file *filp,
2394 unsigned int ioctl, unsigned long arg)
2395{
2396 struct kvm *kvm = filp->private_data;
2397 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002398 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002399 int r;
2400
2401 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002402 case KVM_S390_INTERRUPT: {
2403 struct kvm_s390_interrupt s390int;
2404
2405 r = -EFAULT;
2406 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2407 break;
2408 r = kvm_s390_inject_vm(kvm, &s390int);
2409 break;
2410 }
Cornelia Huck84223592013-07-15 13:36:01 +02002411 case KVM_CREATE_IRQCHIP: {
2412 struct kvm_irq_routing_entry routing;
2413
2414 r = -EINVAL;
2415 if (kvm->arch.use_irqchip) {
2416 /* Set up dummy routing. */
2417 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002418 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002419 }
2420 break;
2421 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002422 case KVM_SET_DEVICE_ATTR: {
2423 r = -EFAULT;
2424 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2425 break;
2426 r = kvm_s390_vm_set_attr(kvm, &attr);
2427 break;
2428 }
2429 case KVM_GET_DEVICE_ATTR: {
2430 r = -EFAULT;
2431 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2432 break;
2433 r = kvm_s390_vm_get_attr(kvm, &attr);
2434 break;
2435 }
2436 case KVM_HAS_DEVICE_ATTR: {
2437 r = -EFAULT;
2438 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2439 break;
2440 r = kvm_s390_vm_has_attr(kvm, &attr);
2441 break;
2442 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002443 case KVM_S390_GET_SKEYS: {
2444 struct kvm_s390_skeys args;
2445
2446 r = -EFAULT;
2447 if (copy_from_user(&args, argp,
2448 sizeof(struct kvm_s390_skeys)))
2449 break;
2450 r = kvm_s390_get_skeys(kvm, &args);
2451 break;
2452 }
2453 case KVM_S390_SET_SKEYS: {
2454 struct kvm_s390_skeys args;
2455
2456 r = -EFAULT;
2457 if (copy_from_user(&args, argp,
2458 sizeof(struct kvm_s390_skeys)))
2459 break;
2460 r = kvm_s390_set_skeys(kvm, &args);
2461 break;
2462 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002463 case KVM_S390_GET_CMMA_BITS: {
2464 struct kvm_s390_cmma_log args;
2465
2466 r = -EFAULT;
2467 if (copy_from_user(&args, argp, sizeof(args)))
2468 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002469 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002470 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002471 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002472 if (!r) {
2473 r = copy_to_user(argp, &args, sizeof(args));
2474 if (r)
2475 r = -EFAULT;
2476 }
2477 break;
2478 }
2479 case KVM_S390_SET_CMMA_BITS: {
2480 struct kvm_s390_cmma_log args;
2481
2482 r = -EFAULT;
2483 if (copy_from_user(&args, argp, sizeof(args)))
2484 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002485 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002486 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002487 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002488 break;
2489 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002490 case KVM_S390_PV_COMMAND: {
2491 struct kvm_pv_cmd args;
2492
Eric Farman67cf68b2021-10-08 22:31:12 +02002493 /* protvirt means user cpu state */
2494 kvm_s390_set_user_cpu_state_ctrl(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002495 r = 0;
2496 if (!is_prot_virt_host()) {
2497 r = -EINVAL;
2498 break;
2499 }
2500 if (copy_from_user(&args, argp, sizeof(args))) {
2501 r = -EFAULT;
2502 break;
2503 }
2504 if (args.flags) {
2505 r = -EINVAL;
2506 break;
2507 }
2508 mutex_lock(&kvm->lock);
2509 r = kvm_s390_handle_pv(kvm, &args);
2510 mutex_unlock(&kvm->lock);
2511 if (copy_to_user(argp, &args, sizeof(args))) {
2512 r = -EFAULT;
2513 break;
2514 }
2515 break;
2516 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002517 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002518 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519 }
2520
2521 return r;
2522}
2523
Tony Krowiak45c9b472015-01-13 11:33:26 -05002524static int kvm_s390_apxa_installed(void)
2525{
Tony Krowiake585b242018-09-25 19:16:18 -04002526 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002527
Tony Krowiake585b242018-09-25 19:16:18 -04002528 if (ap_instructions_available()) {
2529 if (ap_qci(&info) == 0)
2530 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002531 }
2532
2533 return 0;
2534}
2535
Tony Krowiake585b242018-09-25 19:16:18 -04002536/*
2537 * The format of the crypto control block (CRYCB) is specified in the 3 low
2538 * order bits of the CRYCB designation (CRYCBD) field as follows:
2539 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2540 * AP extended addressing (APXA) facility are installed.
2541 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2542 * Format 2: Both the APXA and MSAX3 facilities are installed
2543 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002544static void kvm_s390_set_crycb_format(struct kvm *kvm)
2545{
2546 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2547
Tony Krowiake585b242018-09-25 19:16:18 -04002548 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2549 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2550
2551 /* Check whether MSAX3 is installed */
2552 if (!test_kvm_facility(kvm, 76))
2553 return;
2554
Tony Krowiak45c9b472015-01-13 11:33:26 -05002555 if (kvm_s390_apxa_installed())
2556 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2557 else
2558 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2559}
2560
Tony Krowiak86956e72021-08-23 17:20:47 -04002561/*
2562 * kvm_arch_crypto_set_masks
2563 *
2564 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2565 * to be set.
2566 * @apm: the mask identifying the accessible AP adapters
2567 * @aqm: the mask identifying the accessible AP domains
2568 * @adm: the mask identifying the accessible AP control domains
2569 *
2570 * Set the masks that identify the adapters, domains and control domains to
2571 * which the KVM guest is granted access.
2572 *
2573 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2574 * function.
2575 */
Pierre Morel0e237e42018-10-05 10:31:09 +02002576void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2577 unsigned long *aqm, unsigned long *adm)
2578{
2579 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2580
Pierre Morel0e237e42018-10-05 10:31:09 +02002581 kvm_s390_vcpu_block_all(kvm);
2582
2583 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2584 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2585 memcpy(crycb->apcb1.apm, apm, 32);
2586 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2587 apm[0], apm[1], apm[2], apm[3]);
2588 memcpy(crycb->apcb1.aqm, aqm, 32);
2589 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2590 aqm[0], aqm[1], aqm[2], aqm[3]);
2591 memcpy(crycb->apcb1.adm, adm, 32);
2592 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2593 adm[0], adm[1], adm[2], adm[3]);
2594 break;
2595 case CRYCB_FORMAT1:
2596 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2597 memcpy(crycb->apcb0.apm, apm, 8);
2598 memcpy(crycb->apcb0.aqm, aqm, 2);
2599 memcpy(crycb->apcb0.adm, adm, 2);
2600 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2601 apm[0], *((unsigned short *)aqm),
2602 *((unsigned short *)adm));
2603 break;
2604 default: /* Can not happen */
2605 break;
2606 }
2607
2608 /* recreate the shadow crycb for each vcpu */
2609 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2610 kvm_s390_vcpu_unblock_all(kvm);
Pierre Morel0e237e42018-10-05 10:31:09 +02002611}
2612EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2613
Tony Krowiak86956e72021-08-23 17:20:47 -04002614/*
2615 * kvm_arch_crypto_clear_masks
2616 *
2617 * @kvm: pointer to the target guest's KVM struct containing the crypto masks
2618 * to be cleared.
2619 *
2620 * Clear the masks that identify the adapters, domains and control domains to
2621 * which the KVM guest is granted access.
2622 *
2623 * Note: The kvm->lock mutex must be locked by the caller before invoking this
2624 * function.
2625 */
Tony Krowiak421045982018-09-25 19:16:25 -04002626void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2627{
Tony Krowiak421045982018-09-25 19:16:25 -04002628 kvm_s390_vcpu_block_all(kvm);
2629
2630 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2631 sizeof(kvm->arch.crypto.crycb->apcb0));
2632 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2633 sizeof(kvm->arch.crypto.crycb->apcb1));
2634
Pierre Morel0e237e42018-10-05 10:31:09 +02002635 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002636 /* recreate the shadow crycb for each vcpu */
2637 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002638 kvm_s390_vcpu_unblock_all(kvm);
Tony Krowiak421045982018-09-25 19:16:25 -04002639}
2640EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2641
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002642static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002643{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002644 struct cpuid cpuid;
2645
2646 get_cpu_id(&cpuid);
2647 cpuid.version = 0xff;
2648 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002649}
2650
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002651static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002652{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002653 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002654 kvm_s390_set_crycb_format(kvm);
Tony Krowiak1e753732021-08-23 17:20:46 -04002655 init_rwsem(&kvm->arch.crypto.pqap_hook_rwsem);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002656
Tony Krowiake585b242018-09-25 19:16:18 -04002657 if (!test_kvm_facility(kvm, 76))
2658 return;
2659
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002660 /* Enable AES/DEA protected key functions by default */
2661 kvm->arch.crypto.aes_kw = 1;
2662 kvm->arch.crypto.dea_kw = 1;
2663 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2664 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2665 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2666 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002667}
2668
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002669static void sca_dispose(struct kvm *kvm)
2670{
2671 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002672 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002673 else
2674 free_page((unsigned long)(kvm->arch.sca));
2675 kvm->arch.sca = NULL;
2676}
2677
Carsten Ottee08b9632012-01-04 10:25:20 +01002678int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002679{
Christian Borntraegerc4196212020-11-06 08:34:23 +01002680 gfp_t alloc_flags = GFP_KERNEL_ACCOUNT;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002681 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002682 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002683 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002684
Carsten Ottee08b9632012-01-04 10:25:20 +01002685 rc = -EINVAL;
2686#ifdef CONFIG_KVM_S390_UCONTROL
2687 if (type & ~KVM_VM_S390_UCONTROL)
2688 goto out_err;
2689 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2690 goto out_err;
2691#else
2692 if (type)
2693 goto out_err;
2694#endif
2695
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002696 rc = s390_enable_sie();
2697 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002698 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002699
Carsten Otteb2904112011-10-18 12:27:13 +02002700 rc = -ENOMEM;
2701
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002702 if (!sclp.has_64bscao)
2703 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002704 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002705 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002706 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002707 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002708 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002709 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002710 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002711 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002712 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002713 kvm->arch.sca = (struct bsca_block *)
2714 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002715 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002716
2717 sprintf(debug_name, "kvm-%u", current->pid);
2718
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002719 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002720 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002721 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002722
Michael Mueller19114be2017-05-30 14:26:02 +02002723 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002724 kvm->arch.sie_page2 =
Christian Borntraegerc4196212020-11-06 08:34:23 +01002725 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL_ACCOUNT | GFP_DMA);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002726 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002727 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002728
Michael Mueller25c84db2019-01-31 09:52:41 +01002729 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002730 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002731
2732 for (i = 0; i < kvm_s390_fac_size(); i++) {
Sven Schnelle17e89e12021-05-05 22:01:10 +02002733 kvm->arch.model.fac_mask[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002734 (kvm_s390_fac_base[i] |
2735 kvm_s390_fac_ext[i]);
Sven Schnelle17e89e12021-05-05 22:01:10 +02002736 kvm->arch.model.fac_list[i] = stfle_fac_list[i] &
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002737 kvm_s390_fac_base[i];
2738 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002739 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002740
David Hildenbrand19352222017-08-29 16:31:08 +02002741 /* we are always in czam mode - even on pre z14 machines */
2742 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2743 set_kvm_facility(kvm->arch.model.fac_list, 138);
2744 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002745 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2746 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002747 if (MACHINE_HAS_TLB_GUEST) {
2748 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2749 set_kvm_facility(kvm->arch.model.fac_list, 147);
2750 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002751
Pierre Morel05f31e32019-05-21 17:34:37 +02002752 if (css_general_characteristics.aiv && test_facility(65))
2753 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2754
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002755 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002756 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002757
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002758 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002759
Fei Li51978392017-02-17 17:06:26 +08002760 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002761 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002762 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2763 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002764 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002765 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002766
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002767 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002768 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002769
Carsten Ottee08b9632012-01-04 10:25:20 +01002770 if (type & KVM_VM_S390_UCONTROL) {
2771 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002772 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002773 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002774 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002775 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002776 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002777 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002778 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002779 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002780 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002781 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002782 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002783 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002784 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002785
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002786 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002787 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002788 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002789 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002790 if (use_gisa)
2791 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002792 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002793
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002794 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002795out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002796 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002797 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002798 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002799 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002800 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002801}
2802
Christian Borntraegerd329c032008-11-26 14:50:27 +01002803void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2804{
Janosch Frank29b40f12019-09-30 04:19:18 -04002805 u16 rc, rrc;
2806
Christian Borntraegerd329c032008-11-26 14:50:27 +01002807 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002808 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002809 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002810 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002811 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002812 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002813
2814 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002815 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002816
Dominik Dingele6db1d62015-05-07 15:41:57 +02002817 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002818 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002819 /* We can not hold the vcpu mutex here, we are already dying */
2820 if (kvm_s390_pv_cpu_get_handle(vcpu))
2821 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002822 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002823}
2824
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002825void kvm_arch_destroy_vm(struct kvm *kvm)
2826{
Janosch Frank29b40f12019-09-30 04:19:18 -04002827 u16 rc, rrc;
2828
Marc Zyngier27592ae2021-11-16 16:03:57 +00002829 kvm_destroy_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002830 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002831 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002832 /*
2833 * We are already at the end of life and kvm->lock is not taken.
2834 * This is ok as the file descriptor is closed by now and nobody
2835 * can mess with the pv state. To avoid lockdep_assert_held from
2836 * complaining we do not use kvm_s390_pv_is_protected.
2837 */
2838 if (kvm_s390_pv_get_handle(kvm))
2839 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2840 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002841 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002842 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002843 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002844 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002845 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002846 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002847 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002848}
2849
2850/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002851static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2852{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002853 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002854 if (!vcpu->arch.gmap)
2855 return -ENOMEM;
2856 vcpu->arch.gmap->private = vcpu->kvm;
2857
2858 return 0;
2859}
2860
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002861static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2862{
David Hildenbranda6940672016-08-08 22:39:32 +02002863 if (!kvm_s390_use_sca_entries())
2864 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002865 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002866 if (vcpu->kvm->arch.use_esca) {
2867 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002868
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002869 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002870 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002871 } else {
2872 struct bsca_block *sca = vcpu->kvm->arch.sca;
2873
2874 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002875 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002876 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002877 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002878}
2879
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002880static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002881{
David Hildenbranda6940672016-08-08 22:39:32 +02002882 if (!kvm_s390_use_sca_entries()) {
2883 struct bsca_block *sca = vcpu->kvm->arch.sca;
2884
2885 /* we still need the basic sca for the ipte control */
2886 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2887 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002888 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002889 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002890 read_lock(&vcpu->kvm->arch.sca_lock);
2891 if (vcpu->kvm->arch.use_esca) {
2892 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002893
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002894 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002895 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2896 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002897 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002898 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002899 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002900 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002901
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002902 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002903 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2904 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002905 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002906 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002907 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002908}
2909
2910/* Basic SCA to Extended SCA data copy routines */
2911static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2912{
2913 d->sda = s->sda;
2914 d->sigp_ctrl.c = s->sigp_ctrl.c;
2915 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2916}
2917
2918static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2919{
2920 int i;
2921
2922 d->ipte_control = s->ipte_control;
2923 d->mcn[0] = s->mcn;
2924 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2925 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2926}
2927
2928static int sca_switch_to_extended(struct kvm *kvm)
2929{
2930 struct bsca_block *old_sca = kvm->arch.sca;
2931 struct esca_block *new_sca;
2932 struct kvm_vcpu *vcpu;
Marc Zyngier46808a42021-11-16 16:04:02 +00002933 unsigned long vcpu_idx;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002934 u32 scaol, scaoh;
2935
Janosch Frank29b40f12019-09-30 04:19:18 -04002936 if (kvm->arch.use_esca)
2937 return 0;
2938
Christian Borntraegerc4196212020-11-06 08:34:23 +01002939 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL_ACCOUNT | __GFP_ZERO);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002940 if (!new_sca)
2941 return -ENOMEM;
2942
2943 scaoh = (u32)((u64)(new_sca) >> 32);
2944 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2945
2946 kvm_s390_vcpu_block_all(kvm);
2947 write_lock(&kvm->arch.sca_lock);
2948
2949 sca_copy_b_to_e(new_sca, old_sca);
2950
2951 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2952 vcpu->arch.sie_block->scaoh = scaoh;
2953 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002954 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002955 }
2956 kvm->arch.sca = new_sca;
2957 kvm->arch.use_esca = 1;
2958
2959 write_unlock(&kvm->arch.sca_lock);
2960 kvm_s390_vcpu_unblock_all(kvm);
2961
2962 free_page((unsigned long)old_sca);
2963
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002964 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2965 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002966 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002967}
2968
2969static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2970{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002971 int rc;
2972
David Hildenbranda6940672016-08-08 22:39:32 +02002973 if (!kvm_s390_use_sca_entries()) {
2974 if (id < KVM_MAX_VCPUS)
2975 return true;
2976 return false;
2977 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002978 if (id < KVM_S390_BSCA_CPU_SLOTS)
2979 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002980 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002981 return false;
2982
2983 mutex_lock(&kvm->lock);
2984 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2985 mutex_unlock(&kvm->lock);
2986
2987 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002988}
2989
David Hildenbranddb0758b2016-02-15 09:42:25 +01002990/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2991static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2992{
2993 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002994 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002995 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002996 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002997}
2998
2999/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3000static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3001{
3002 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01003003 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003004 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3005 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003006 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003007}
3008
3009/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3010static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3011{
3012 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
3013 vcpu->arch.cputm_enabled = true;
3014 __start_cpu_timer_accounting(vcpu);
3015}
3016
3017/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
3018static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3019{
3020 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
3021 __stop_cpu_timer_accounting(vcpu);
3022 vcpu->arch.cputm_enabled = false;
3023}
3024
3025static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3026{
3027 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3028 __enable_cpu_timer_accounting(vcpu);
3029 preempt_enable();
3030}
3031
3032static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
3033{
3034 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3035 __disable_cpu_timer_accounting(vcpu);
3036 preempt_enable();
3037}
3038
David Hildenbrand4287f242016-02-15 09:40:12 +01003039/* set the cpu timer - may only be called from the VCPU thread itself */
3040void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3041{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003042 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003043 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003044 if (vcpu->arch.cputm_enabled)
3045 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003046 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003047 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003048 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003049}
3050
David Hildenbranddb0758b2016-02-15 09:42:25 +01003051/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003052__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3053{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003054 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003055 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003056
3057 if (unlikely(!vcpu->arch.cputm_enabled))
3058 return vcpu->arch.sie_block->cputm;
3059
David Hildenbrand9c23a132016-02-17 21:53:33 +01003060 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3061 do {
3062 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3063 /*
3064 * If the writer would ever execute a read in the critical
3065 * section, e.g. in irq context, we have a deadlock.
3066 */
3067 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3068 value = vcpu->arch.sie_block->cputm;
3069 /* if cputm_start is 0, accounting is being started/stopped */
3070 if (likely(vcpu->arch.cputm_start))
3071 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3072 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3073 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003074 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003075}
3076
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003077void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3078{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003079
David Hildenbrand37d9df92015-03-11 16:47:33 +01003080 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003081 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003082 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003083 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003084 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003085}
3086
3087void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3088{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003089 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003090 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003091 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003092 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003093 vcpu->arch.enabled_gmap = gmap_get_enabled();
3094 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003095
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003096}
3097
Dominik Dingel31928aa2014-12-04 15:47:07 +01003098void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003099{
Jason J. Herne72f25022014-11-25 09:46:02 -05003100 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003101 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003102 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003103 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003104 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003105 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003106 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003107 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003108 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003109 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003110 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3111 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003112 /* make vcpu_load load the right gmap on the first trigger */
3113 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003114}
3115
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003116static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3117{
3118 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3119 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3120 return true;
3121 return false;
3122}
3123
3124static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3125{
3126 /* At least one ECC subfunction must be present */
3127 return kvm_has_pckmo_subfunc(kvm, 32) ||
3128 kvm_has_pckmo_subfunc(kvm, 33) ||
3129 kvm_has_pckmo_subfunc(kvm, 34) ||
3130 kvm_has_pckmo_subfunc(kvm, 40) ||
3131 kvm_has_pckmo_subfunc(kvm, 41);
3132
3133}
3134
Tony Krowiak5102ee82014-06-27 14:46:01 -04003135static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3136{
Tony Krowiake585b242018-09-25 19:16:18 -04003137 /*
3138 * If the AP instructions are not being interpreted and the MSAX3
3139 * facility is not configured for the guest, there is nothing to set up.
3140 */
3141 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003142 return;
3143
Tony Krowiake585b242018-09-25 19:16:18 -04003144 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003145 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003146 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003147 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003148
Tony Krowiake585b242018-09-25 19:16:18 -04003149 if (vcpu->kvm->arch.crypto.apie)
3150 vcpu->arch.sie_block->eca |= ECA_APIE;
3151
3152 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003153 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003154 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003155 /* ecc is also wrapped with AES key */
3156 if (kvm_has_pckmo_ecc(vcpu->kvm))
3157 vcpu->arch.sie_block->ecd |= ECD_ECC;
3158 }
3159
Tony Krowiaka374e892014-09-03 10:13:53 +02003160 if (vcpu->kvm->arch.crypto.dea_kw)
3161 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003162}
3163
Dominik Dingelb31605c2014-03-25 13:47:11 +01003164void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3165{
3166 free_page(vcpu->arch.sie_block->cbrlo);
3167 vcpu->arch.sie_block->cbrlo = 0;
3168}
3169
3170int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3171{
Christian Borntraegerc4196212020-11-06 08:34:23 +01003172 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL_ACCOUNT);
Dominik Dingelb31605c2014-03-25 13:47:11 +01003173 if (!vcpu->arch.sie_block->cbrlo)
3174 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003175 return 0;
3176}
3177
Michael Mueller91520f12015-02-27 14:32:11 +01003178static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3179{
3180 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3181
Michael Mueller91520f12015-02-27 14:32:11 +01003182 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003183 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003184 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003185}
3186
Sean Christophersonff72bb52019-12-18 13:55:20 -08003187static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3188{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003189 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003190 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003191
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003192 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3193 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003194 CPUSTAT_STOPPED);
3195
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003196 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003197 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003198 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003199 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003200
Michael Mueller91520f12015-02-27 14:32:11 +01003201 kvm_s390_vcpu_setup_model(vcpu);
3202
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003203 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3204 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003205 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003206 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003207 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003208 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003209 vcpu->arch.sie_block->ecb |= ECB_TE;
Janis Schoetterl-Glausch7119dec2021-06-29 10:55:30 +02003210 if (!kvm_is_ucontrol(vcpu->kvm))
3211 vcpu->arch.sie_block->ecb |= ECB_SPECI;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003212
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003213 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003214 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003215 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003216 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3217 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003218 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003219 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003220 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003221 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003222 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003223 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003224 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003225 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003226 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003227 vcpu->arch.sie_block->eca |= ECA_VX;
3228 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003229 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003230 if (test_kvm_facility(vcpu->kvm, 139))
3231 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003232 if (test_kvm_facility(vcpu->kvm, 156))
3233 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003234 if (vcpu->arch.sie_block->gd) {
3235 vcpu->arch.sie_block->eca |= ECA_AIV;
3236 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3237 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3238 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003239 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3240 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003241 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003242
3243 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003244 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003245 else
3246 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003247
Dominik Dingele6db1d62015-05-07 15:41:57 +02003248 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003249 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3250 if (rc)
3251 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003252 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003253 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003254 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003255
Collin Walling67d49d52018-08-31 12:51:19 -04003256 vcpu->arch.sie_block->hpid = HPID_KVM;
3257
Tony Krowiak5102ee82014-06-27 14:46:01 -04003258 kvm_s390_vcpu_crypto_setup(vcpu);
3259
Janosch Frank29b40f12019-09-30 04:19:18 -04003260 mutex_lock(&vcpu->kvm->lock);
3261 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3262 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3263 if (rc)
3264 kvm_s390_vcpu_unsetup_cmma(vcpu);
3265 }
3266 mutex_unlock(&vcpu->kvm->lock);
3267
Dominik Dingelb31605c2014-03-25 13:47:11 +01003268 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003269}
3270
Sean Christopherson897cc382019-12-18 13:55:09 -08003271int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3272{
3273 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3274 return -EINVAL;
3275 return 0;
3276}
3277
Sean Christophersone529ef62019-12-18 13:55:15 -08003278int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003279{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003280 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003281 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003282
QingFeng Haoda72ca42017-06-07 11:41:19 +02003283 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Christian Borntraegerc4196212020-11-06 08:34:23 +01003284 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL_ACCOUNT);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003285 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003286 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003287
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003288 vcpu->arch.sie_block = &sie_page->sie_block;
3289 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3290
David Hildenbrandefed1102015-04-16 12:32:41 +02003291 /* the real guest size will always be smaller than msl */
3292 vcpu->arch.sie_block->mso = 0;
3293 vcpu->arch.sie_block->msl = sclp.hamax;
3294
Sean Christophersone529ef62019-12-18 13:55:15 -08003295 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003296 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003297 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003298 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3299 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003300 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003301
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003302 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3303 kvm_clear_async_pf_completion_queue(vcpu);
3304 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3305 KVM_SYNC_GPRS |
3306 KVM_SYNC_ACRS |
3307 KVM_SYNC_CRS |
3308 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003309 KVM_SYNC_PFAULT |
3310 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003311 kvm_s390_set_prefix(vcpu, 0);
3312 if (test_kvm_facility(vcpu->kvm, 64))
3313 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3314 if (test_kvm_facility(vcpu->kvm, 82))
3315 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3316 if (test_kvm_facility(vcpu->kvm, 133))
3317 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3318 if (test_kvm_facility(vcpu->kvm, 156))
3319 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3320 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3321 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3322 */
3323 if (MACHINE_HAS_VX)
3324 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3325 else
3326 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3327
3328 if (kvm_is_ucontrol(vcpu->kvm)) {
3329 rc = __kvm_ucontrol_vcpu_init(vcpu);
3330 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003331 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003332 }
3333
Sean Christophersone529ef62019-12-18 13:55:15 -08003334 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3335 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3336 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003337
Sean Christophersonff72bb52019-12-18 13:55:20 -08003338 rc = kvm_s390_vcpu_setup(vcpu);
3339 if (rc)
3340 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003341 return 0;
3342
Sean Christophersonff72bb52019-12-18 13:55:20 -08003343out_ucontrol_uninit:
3344 if (kvm_is_ucontrol(vcpu->kvm))
3345 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003346out_free_sie_block:
3347 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003348 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003349}
3350
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003351int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3352{
Halil Pasic9b57e9d2021-10-19 19:53:59 +02003353 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
David Hildenbrand9a022062014-08-05 17:40:47 +02003354 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003355}
3356
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003357bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3358{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003359 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003360}
3361
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003362void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003363{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003364 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003365 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003366}
3367
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003368void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003369{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003370 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003371}
3372
Christian Borntraeger8e236542015-04-09 13:49:04 +02003373static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3374{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003375 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003376 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003377}
3378
David Hildenbrand9ea59722018-09-25 19:16:16 -04003379bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3380{
3381 return atomic_read(&vcpu->arch.sie_block->prog20) &
3382 (PROG_BLOCK_SIE | PROG_REQUEST);
3383}
3384
Christian Borntraeger8e236542015-04-09 13:49:04 +02003385static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3386{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003387 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003388}
3389
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003390/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003391 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003392 * If the CPU is not running (e.g. waiting as idle) the function will
3393 * return immediately. */
3394void exit_sie(struct kvm_vcpu *vcpu)
3395{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003396 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003397 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003398 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3399 cpu_relax();
3400}
3401
Christian Borntraeger8e236542015-04-09 13:49:04 +02003402/* Kick a guest cpu out of SIE to process a request synchronously */
3403void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003404{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003405 kvm_make_request(req, vcpu);
3406 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003407}
3408
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003409static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3410 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003411{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003412 struct kvm *kvm = gmap->private;
3413 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003414 unsigned long prefix;
Marc Zyngier46808a42021-11-16 16:04:02 +00003415 unsigned long i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003416
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003417 if (gmap_is_shadow(gmap))
3418 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003419 if (start >= 1UL << 31)
3420 /* We are only interested in prefix pages */
3421 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003422 kvm_for_each_vcpu(i, vcpu, kvm) {
3423 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003424 prefix = kvm_s390_get_prefix(vcpu);
3425 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3426 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3427 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003428 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003429 }
3430 }
3431}
3432
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003433bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3434{
3435 /* do not poll with more than halt_poll_max_steal percent of steal time */
3436 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3437 halt_poll_max_steal) {
3438 vcpu->stat.halt_no_poll_steal++;
3439 return true;
3440 }
3441 return false;
3442}
3443
Christoffer Dallb6d33832012-03-08 16:44:24 -05003444int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3445{
3446 /* kvm common code refers to this, but never calls it */
3447 BUG();
3448 return 0;
3449}
3450
Carsten Otte14eebd92012-05-15 14:15:26 +02003451static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3452 struct kvm_one_reg *reg)
3453{
3454 int r = -EINVAL;
3455
3456 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003457 case KVM_REG_S390_TODPR:
3458 r = put_user(vcpu->arch.sie_block->todpr,
3459 (u32 __user *)reg->addr);
3460 break;
3461 case KVM_REG_S390_EPOCHDIFF:
3462 r = put_user(vcpu->arch.sie_block->epoch,
3463 (u64 __user *)reg->addr);
3464 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003465 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003466 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003467 (u64 __user *)reg->addr);
3468 break;
3469 case KVM_REG_S390_CLOCK_COMP:
3470 r = put_user(vcpu->arch.sie_block->ckc,
3471 (u64 __user *)reg->addr);
3472 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003473 case KVM_REG_S390_PFTOKEN:
3474 r = put_user(vcpu->arch.pfault_token,
3475 (u64 __user *)reg->addr);
3476 break;
3477 case KVM_REG_S390_PFCOMPARE:
3478 r = put_user(vcpu->arch.pfault_compare,
3479 (u64 __user *)reg->addr);
3480 break;
3481 case KVM_REG_S390_PFSELECT:
3482 r = put_user(vcpu->arch.pfault_select,
3483 (u64 __user *)reg->addr);
3484 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003485 case KVM_REG_S390_PP:
3486 r = put_user(vcpu->arch.sie_block->pp,
3487 (u64 __user *)reg->addr);
3488 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003489 case KVM_REG_S390_GBEA:
3490 r = put_user(vcpu->arch.sie_block->gbea,
3491 (u64 __user *)reg->addr);
3492 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003493 default:
3494 break;
3495 }
3496
3497 return r;
3498}
3499
3500static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3501 struct kvm_one_reg *reg)
3502{
3503 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003504 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003505
3506 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003507 case KVM_REG_S390_TODPR:
3508 r = get_user(vcpu->arch.sie_block->todpr,
3509 (u32 __user *)reg->addr);
3510 break;
3511 case KVM_REG_S390_EPOCHDIFF:
3512 r = get_user(vcpu->arch.sie_block->epoch,
3513 (u64 __user *)reg->addr);
3514 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003515 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003516 r = get_user(val, (u64 __user *)reg->addr);
3517 if (!r)
3518 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003519 break;
3520 case KVM_REG_S390_CLOCK_COMP:
3521 r = get_user(vcpu->arch.sie_block->ckc,
3522 (u64 __user *)reg->addr);
3523 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003524 case KVM_REG_S390_PFTOKEN:
3525 r = get_user(vcpu->arch.pfault_token,
3526 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003527 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3528 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003529 break;
3530 case KVM_REG_S390_PFCOMPARE:
3531 r = get_user(vcpu->arch.pfault_compare,
3532 (u64 __user *)reg->addr);
3533 break;
3534 case KVM_REG_S390_PFSELECT:
3535 r = get_user(vcpu->arch.pfault_select,
3536 (u64 __user *)reg->addr);
3537 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003538 case KVM_REG_S390_PP:
3539 r = get_user(vcpu->arch.sie_block->pp,
3540 (u64 __user *)reg->addr);
3541 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003542 case KVM_REG_S390_GBEA:
3543 r = get_user(vcpu->arch.sie_block->gbea,
3544 (u64 __user *)reg->addr);
3545 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003546 default:
3547 break;
3548 }
3549
3550 return r;
3551}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003552
Janosch Frank7de3f142020-01-31 05:02:02 -05003553static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003554{
Janosch Frank7de3f142020-01-31 05:02:02 -05003555 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3556 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3557 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3558
3559 kvm_clear_async_pf_completion_queue(vcpu);
3560 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3561 kvm_s390_vcpu_stop(vcpu);
3562 kvm_s390_clear_local_irqs(vcpu);
3563}
3564
3565static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3566{
3567 /* Initial reset is a superset of the normal reset */
3568 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3569
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003570 /*
3571 * This equals initial cpu reset in pop, but we don't switch to ESA.
3572 * We do not only reset the internal data, but also ...
3573 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003574 vcpu->arch.sie_block->gpsw.mask = 0;
3575 vcpu->arch.sie_block->gpsw.addr = 0;
3576 kvm_s390_set_prefix(vcpu, 0);
3577 kvm_s390_set_cpu_timer(vcpu, 0);
3578 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003579 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3580 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3581 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003582
3583 /* ... the data in sync regs */
3584 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3585 vcpu->run->s.regs.ckc = 0;
3586 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3587 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3588 vcpu->run->psw_addr = 0;
3589 vcpu->run->psw_mask = 0;
3590 vcpu->run->s.regs.todpr = 0;
3591 vcpu->run->s.regs.cputm = 0;
3592 vcpu->run->s.regs.ckc = 0;
3593 vcpu->run->s.regs.pp = 0;
3594 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003595 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003596 /*
3597 * Do not reset these registers in the protected case, as some of
3598 * them are overlayed and they are not accessible in this case
3599 * anyway.
3600 */
3601 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3602 vcpu->arch.sie_block->gbea = 1;
3603 vcpu->arch.sie_block->pp = 0;
3604 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3605 vcpu->arch.sie_block->todpr = 0;
3606 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003607}
3608
3609static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3610{
3611 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3612
3613 /* Clear reset is a superset of the initial reset */
3614 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3615
3616 memset(&regs->gprs, 0, sizeof(regs->gprs));
3617 memset(&regs->vrs, 0, sizeof(regs->vrs));
3618 memset(&regs->acrs, 0, sizeof(regs->acrs));
3619 memset(&regs->gscb, 0, sizeof(regs->gscb));
3620
3621 regs->etoken = 0;
3622 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003623}
3624
3625int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3626{
Christoffer Dall875656f2017-12-04 21:35:27 +01003627 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003628 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003629 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003630 return 0;
3631}
3632
3633int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3634{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003635 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003636 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003637 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003638 return 0;
3639}
3640
3641int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3642 struct kvm_sregs *sregs)
3643{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003644 vcpu_load(vcpu);
3645
Christian Borntraeger59674c12012-01-11 11:20:33 +01003646 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003647 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003648
3649 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003650 return 0;
3651}
3652
3653int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3654 struct kvm_sregs *sregs)
3655{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003656 vcpu_load(vcpu);
3657
Christian Borntraeger59674c12012-01-11 11:20:33 +01003658 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003659 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003660
3661 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003662 return 0;
3663}
3664
3665int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3666{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003667 int ret = 0;
3668
3669 vcpu_load(vcpu);
3670
3671 if (test_fp_ctl(fpu->fpc)) {
3672 ret = -EINVAL;
3673 goto out;
3674 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003675 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003676 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003677 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3678 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003679 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003680 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003681
3682out:
3683 vcpu_put(vcpu);
3684 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003685}
3686
3687int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3688{
Christoffer Dall13931232017-12-04 21:35:34 +01003689 vcpu_load(vcpu);
3690
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003691 /* make sure we have the latest values */
3692 save_fpu_regs();
3693 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003694 convert_vx_to_fp((freg_t *) fpu->fprs,
3695 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003696 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003697 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003698 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003699
3700 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003701 return 0;
3702}
3703
3704static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3705{
3706 int rc = 0;
3707
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003708 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003709 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003710 else {
3711 vcpu->run->psw_mask = psw.mask;
3712 vcpu->run->psw_addr = psw.addr;
3713 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003714 return rc;
3715}
3716
3717int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3718 struct kvm_translation *tr)
3719{
3720 return -EINVAL; /* not implemented yet */
3721}
3722
David Hildenbrand27291e22014-01-23 12:26:52 +01003723#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3724 KVM_GUESTDBG_USE_HW_BP | \
3725 KVM_GUESTDBG_ENABLE)
3726
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003727int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3728 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003729{
David Hildenbrand27291e22014-01-23 12:26:52 +01003730 int rc = 0;
3731
Christoffer Dall66b56562017-12-04 21:35:33 +01003732 vcpu_load(vcpu);
3733
David Hildenbrand27291e22014-01-23 12:26:52 +01003734 vcpu->guest_debug = 0;
3735 kvm_s390_clear_bp_data(vcpu);
3736
Christoffer Dall66b56562017-12-04 21:35:33 +01003737 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3738 rc = -EINVAL;
3739 goto out;
3740 }
3741 if (!sclp.has_gpere) {
3742 rc = -EINVAL;
3743 goto out;
3744 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003745
3746 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3747 vcpu->guest_debug = dbg->control;
3748 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003749 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003750
3751 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3752 rc = kvm_s390_import_bp_data(vcpu, dbg);
3753 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003754 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003755 vcpu->arch.guestdbg.last_bp = 0;
3756 }
3757
3758 if (rc) {
3759 vcpu->guest_debug = 0;
3760 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003761 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003762 }
3763
Christoffer Dall66b56562017-12-04 21:35:33 +01003764out:
3765 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003766 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003767}
3768
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003769int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3770 struct kvm_mp_state *mp_state)
3771{
Christoffer Dallfd232562017-12-04 21:35:30 +01003772 int ret;
3773
3774 vcpu_load(vcpu);
3775
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003776 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003777 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3778 KVM_MP_STATE_OPERATING;
3779
3780 vcpu_put(vcpu);
3781 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003782}
3783
3784int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3785 struct kvm_mp_state *mp_state)
3786{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003787 int rc = 0;
3788
Christoffer Dalle83dff52017-12-04 21:35:31 +01003789 vcpu_load(vcpu);
3790
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003791 /* user space knows about this interface - let it control the state */
Eric Farman67cf68b2021-10-08 22:31:12 +02003792 kvm_s390_set_user_cpu_state_ctrl(vcpu->kvm);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003793
3794 switch (mp_state->mp_state) {
3795 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003796 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003797 break;
3798 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003799 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003800 break;
3801 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003802 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3803 rc = -ENXIO;
3804 break;
3805 }
3806 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3807 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003808 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003809 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003810 default:
3811 rc = -ENXIO;
3812 }
3813
Christoffer Dalle83dff52017-12-04 21:35:31 +01003814 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003815 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003816}
3817
David Hildenbrand8ad35752014-03-14 11:00:21 +01003818static bool ibs_enabled(struct kvm_vcpu *vcpu)
3819{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003820 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003821}
3822
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003823static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3824{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003825retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003826 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003827 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003828 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003829 /*
3830 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003831 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003832 * This ensures that the ipte instruction for this request has
3833 * already finished. We might race against a second unmapper that
3834 * wants to set the blocking bit. Lets just retry the request loop.
3835 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003836 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003837 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003838 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3839 kvm_s390_get_prefix(vcpu),
3840 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003841 if (rc) {
3842 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003843 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003844 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003845 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003846 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003847
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003848 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3849 vcpu->arch.sie_block->ihcpu = 0xffff;
3850 goto retry;
3851 }
3852
David Hildenbrand8ad35752014-03-14 11:00:21 +01003853 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3854 if (!ibs_enabled(vcpu)) {
3855 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003856 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003857 }
3858 goto retry;
3859 }
3860
3861 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3862 if (ibs_enabled(vcpu)) {
3863 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003864 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003865 }
3866 goto retry;
3867 }
3868
David Hildenbrand6502a342016-06-21 14:19:51 +02003869 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3870 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3871 goto retry;
3872 }
3873
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003874 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3875 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003876 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003877 * instruction manually, in order to provide additional
3878 * functionalities needed for live migration.
3879 */
3880 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3881 goto retry;
3882 }
3883
3884 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3885 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003886 * Re-enable CMM virtualization if CMMA is available and
3887 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003888 */
3889 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003890 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003891 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3892 goto retry;
3893 }
3894
David Hildenbrand0759d062014-05-13 16:54:32 +02003895 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003896 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003897 /* we left the vsie handler, nothing to do, just clear the request */
3898 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003899
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003900 return 0;
3901}
3902
David Hildenbrand0e7def52018-02-07 12:46:43 +01003903void kvm_s390_set_tod_clock(struct kvm *kvm,
3904 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003905{
3906 struct kvm_vcpu *vcpu;
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003907 union tod_clock clk;
Marc Zyngier46808a42021-11-16 16:04:02 +00003908 unsigned long i;
Collin L. Walling8fa16962016-07-26 15:29:44 -04003909
3910 mutex_lock(&kvm->lock);
3911 preempt_disable();
3912
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003913 store_tod_clock_ext(&clk);
Collin L. Walling8fa16962016-07-26 15:29:44 -04003914
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003915 kvm->arch.epoch = gtod->tod - clk.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003916 kvm->arch.epdx = 0;
3917 if (test_kvm_facility(kvm, 139)) {
Heiko Carstens2cfd7b72021-02-08 16:27:33 +01003918 kvm->arch.epdx = gtod->epoch_idx - clk.ei;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003919 if (kvm->arch.epoch > gtod->tod)
3920 kvm->arch.epdx -= 1;
3921 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003922
3923 kvm_s390_vcpu_block_all(kvm);
3924 kvm_for_each_vcpu(i, vcpu, kvm) {
3925 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3926 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3927 }
3928
3929 kvm_s390_vcpu_unblock_all(kvm);
3930 preempt_enable();
3931 mutex_unlock(&kvm->lock);
3932}
3933
Thomas Huthfa576c52014-05-06 17:20:16 +02003934/**
3935 * kvm_arch_fault_in_page - fault-in guest page if necessary
3936 * @vcpu: The corresponding virtual cpu
3937 * @gpa: Guest physical address
3938 * @writable: Whether the page should be writable or not
3939 *
3940 * Make sure that a guest page has been faulted-in on the host.
3941 *
3942 * Return: Zero on success, negative error code otherwise.
3943 */
3944long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003945{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003946 return gmap_fault(vcpu->arch.gmap, gpa,
3947 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003948}
3949
Dominik Dingel3c038e62013-10-07 17:11:48 +02003950static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3951 unsigned long token)
3952{
3953 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003954 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003955
3956 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003957 irq.u.ext.ext_params2 = token;
3958 irq.type = KVM_S390_INT_PFAULT_INIT;
3959 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003960 } else {
3961 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003962 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003963 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3964 }
3965}
3966
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003967bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003968 struct kvm_async_pf *work)
3969{
3970 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3971 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003972
3973 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003974}
3975
3976void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3977 struct kvm_async_pf *work)
3978{
3979 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3980 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3981}
3982
3983void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3984 struct kvm_async_pf *work)
3985{
3986 /* s390 will always inject the page directly */
3987}
3988
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003989bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003990{
3991 /*
3992 * s390 will always inject the page directly,
3993 * but we still want check_async_completion to cleanup
3994 */
3995 return true;
3996}
3997
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003998static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003999{
4000 hva_t hva;
4001 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004002
4003 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004004 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004005 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
4006 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004007 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004008 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004009 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02004010 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004011 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02004012 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004013 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004014 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004015 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004016
Heiko Carstens81480cc2014-01-01 16:36:07 +01004017 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
4018 hva += current->thread.gmap_addr & ~PAGE_MASK;
4019 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004020 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02004021
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02004022 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02004023}
4024
Thomas Huth3fb4c402013-09-12 10:33:43 +02004025static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004026{
Thomas Huth3fb4c402013-09-12 10:33:43 +02004027 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01004028
Dominik Dingel3c038e62013-10-07 17:11:48 +02004029 /*
4030 * On s390 notifications for arriving pages will be delivered directly
4031 * to the guest but the house keeping for completed pfaults is
4032 * handled outside the worker.
4033 */
4034 kvm_check_async_pf_completion(vcpu);
4035
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004036 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
4037 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004038
4039 if (need_resched())
4040 schedule();
4041
Jens Freimann79395032014-04-17 10:10:30 +02004042 if (!kvm_is_ucontrol(vcpu->kvm)) {
4043 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4044 if (rc)
4045 return rc;
4046 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004047
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004048 rc = kvm_s390_handle_requests(vcpu);
4049 if (rc)
4050 return rc;
4051
David Hildenbrand27291e22014-01-23 12:26:52 +01004052 if (guestdbg_enabled(vcpu)) {
4053 kvm_s390_backup_guest_per_regs(vcpu);
4054 kvm_s390_patch_guest_per_regs(vcpu);
4055 }
4056
Sean Christopherson4eeef242021-09-10 11:32:19 -07004057 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
Michael Mueller9f30f622019-01-31 09:52:44 +01004058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004059 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004060 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4061 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4062 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004063
Thomas Huth3fb4c402013-09-12 10:33:43 +02004064 return 0;
4065}
4066
Thomas Huth492d8642015-02-10 16:11:01 +01004067static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4068{
David Hildenbrand56317922016-01-12 17:37:58 +01004069 struct kvm_s390_pgm_info pgm_info = {
4070 .code = PGM_ADDRESSING,
4071 };
4072 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004073 int rc;
4074
4075 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4076 trace_kvm_s390_sie_fault(vcpu);
4077
4078 /*
4079 * We want to inject an addressing exception, which is defined as a
4080 * suppressing or terminating exception. However, since we came here
4081 * by a DAT access exception, the PSW still points to the faulting
4082 * instruction since DAT exceptions are nullifying. So we've got
4083 * to look up the current opcode to get the length of the instruction
4084 * to be able to forward the PSW.
4085 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004086 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004087 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004088 if (rc < 0) {
4089 return rc;
4090 } else if (rc) {
4091 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4092 * Forward by arbitrary ilc, injection will take care of
4093 * nullification if necessary.
4094 */
4095 pgm_info = vcpu->arch.pgm;
4096 ilen = 4;
4097 }
David Hildenbrand56317922016-01-12 17:37:58 +01004098 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4099 kvm_s390_forward_psw(vcpu, ilen);
4100 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004101}
4102
Thomas Huth3fb4c402013-09-12 10:33:43 +02004103static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4104{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004105 struct mcck_volatile_info *mcck_info;
4106 struct sie_page *sie_page;
4107
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004108 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4109 vcpu->arch.sie_block->icptcode);
4110 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4111
David Hildenbrand27291e22014-01-23 12:26:52 +01004112 if (guestdbg_enabled(vcpu))
4113 kvm_s390_restore_guest_per_regs(vcpu);
4114
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004115 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4116 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004117
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004118 if (exit_reason == -EINTR) {
4119 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4120 sie_page = container_of(vcpu->arch.sie_block,
4121 struct sie_page, sie_block);
4122 mcck_info = &sie_page->mcck_info;
4123 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4124 return 0;
4125 }
4126
David Hildenbrand71f116b2015-10-19 16:24:28 +02004127 if (vcpu->arch.sie_block->icptcode > 0) {
4128 int rc = kvm_handle_sie_intercept(vcpu);
4129
4130 if (rc != -EOPNOTSUPP)
4131 return rc;
4132 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4133 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4134 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4135 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4136 return -EREMOTE;
4137 } else if (exit_reason != -EFAULT) {
4138 vcpu->stat.exit_null++;
4139 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004140 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4141 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4142 vcpu->run->s390_ucontrol.trans_exc_code =
4143 current->thread.gmap_addr;
4144 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004145 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004146 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004147 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004148 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004149 if (kvm_arch_setup_async_pf(vcpu))
4150 return 0;
Christian Borntraeger50a05be2020-11-25 10:06:58 +01004151 vcpu->stat.pfault_sync++;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004152 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004153 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004154 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004155}
4156
Janosch Frank3adae0b2019-12-13 08:26:06 -05004157#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004158static int __vcpu_run(struct kvm_vcpu *vcpu)
4159{
4160 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004161 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004162
Thomas Huth800c1062013-09-12 10:33:45 +02004163 /*
4164 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4165 * ning the guest), so that memslots (and other stuff) are protected
4166 */
4167 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4168
Thomas Hutha76ccff2013-09-12 10:33:44 +02004169 do {
4170 rc = vcpu_pre_run(vcpu);
4171 if (rc)
4172 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004173
Thomas Huth800c1062013-09-12 10:33:45 +02004174 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004175 /*
4176 * As PF_VCPU will be used in fault handler, between
4177 * guest_enter and guest_exit should be no uaccess.
4178 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004179 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004180 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004181 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004182 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004183 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4184 memcpy(sie_page->pv_grregs,
4185 vcpu->run->s.regs.gprs,
4186 sizeof(sie_page->pv_grregs));
4187 }
Sven Schnelle56e62a72020-11-21 11:14:56 +01004188 if (test_cpu_flag(CIF_FPU))
4189 load_fpu_regs();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004190 exit_reason = sie64a(vcpu->arch.sie_block,
4191 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004192 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4193 memcpy(vcpu->run->s.regs.gprs,
4194 sie_page->pv_grregs,
4195 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004196 /*
4197 * We're not allowed to inject interrupts on intercepts
4198 * that leave the guest state in an "in-between" state
4199 * where the next SIE entry will do a continuation.
4200 * Fence interrupts in our "internal" PSW.
4201 */
4202 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4203 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4204 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4205 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004206 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004207 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004208 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004209 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004210 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004211 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004212
Thomas Hutha76ccff2013-09-12 10:33:44 +02004213 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004214 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004215
Thomas Huth800c1062013-09-12 10:33:45 +02004216 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004217 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004218}
4219
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004220static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004221{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004222 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004223 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004224 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004225
4226 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004227 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004228 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4229 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004230 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004231 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4232 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4233 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4234 }
4235 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4236 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4237 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4238 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004239 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4240 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004241 }
Collin Walling23a60f82020-06-22 11:46:36 -04004242 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4243 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4244 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
Collin Walling3fd84172021-10-26 22:54:51 -04004245 VCPU_EVENT(vcpu, 3, "setting cpnc to %d", vcpu->arch.diag318_info.cpnc);
Collin Walling23a60f82020-06-22 11:46:36 -04004246 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004247 /*
4248 * If userspace sets the riccb (e.g. after migration) to a valid state,
4249 * we should enable RI here instead of doing the lazy enablement.
4250 */
4251 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004252 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004253 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004254 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004255 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004256 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004257 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004258 /*
4259 * If userspace sets the gscb (e.g. after migration) to non-zero,
4260 * we should enable GS here instead of doing the lazy enablement.
4261 */
4262 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4263 test_kvm_facility(vcpu->kvm, 133) &&
4264 gscb->gssm &&
4265 !vcpu->arch.gs_enabled) {
4266 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4267 vcpu->arch.sie_block->ecb |= ECB_GS;
4268 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4269 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004270 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004271 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4272 test_kvm_facility(vcpu->kvm, 82)) {
4273 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4274 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4275 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004276 if (MACHINE_HAS_GS) {
4277 preempt_disable();
4278 __ctl_set_bit(2, 4);
4279 if (current->thread.gs_cb) {
4280 vcpu->arch.host_gscb = current->thread.gs_cb;
4281 save_gs_cb(vcpu->arch.host_gscb);
4282 }
4283 if (vcpu->arch.gs_enabled) {
4284 current->thread.gs_cb = (struct gs_cb *)
4285 &vcpu->run->s.regs.gscb;
4286 restore_gs_cb(current->thread.gs_cb);
4287 }
4288 preempt_enable();
4289 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004290 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004291}
4292
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004293static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004294{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004295 struct kvm_run *kvm_run = vcpu->run;
4296
Janosch Frank811ea792019-06-14 13:11:21 +02004297 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4298 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4299 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4300 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4301 /* some control register changes require a tlb flush */
4302 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4303 }
4304 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4305 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4306 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4307 }
4308 save_access_regs(vcpu->arch.host_acrs);
4309 restore_access_regs(vcpu->run->s.regs.acrs);
4310 /* save host (userspace) fprs/vrs */
4311 save_fpu_regs();
4312 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4313 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4314 if (MACHINE_HAS_VX)
4315 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4316 else
4317 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4318 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4319 if (test_fp_ctl(current->thread.fpu.fpc))
4320 /* User space provided an invalid FPC, let's clear it */
4321 current->thread.fpu.fpc = 0;
4322
4323 /* Sync fmt2 only data */
4324 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004325 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004326 } else {
4327 /*
4328 * In several places we have to modify our internal view to
4329 * not do things that are disallowed by the ultravisor. For
4330 * example we must not inject interrupts after specific exits
4331 * (e.g. 112 prefix page not secure). We do this by turning
4332 * off the machine check, external and I/O interrupt bits
4333 * of our PSW copy. To avoid getting validity intercepts, we
4334 * do only accept the condition code from userspace.
4335 */
4336 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4337 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4338 PSW_MASK_CC;
4339 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004340
David Hildenbrandb028ee32014-07-17 10:47:43 +02004341 kvm_run->kvm_dirty_regs = 0;
4342}
4343
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004344static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004345{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004346 struct kvm_run *kvm_run = vcpu->run;
4347
David Hildenbrandb028ee32014-07-17 10:47:43 +02004348 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4349 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4350 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004351 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004352 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004353 if (MACHINE_HAS_GS) {
Heiko Carstens44bada22021-04-15 10:01:27 +02004354 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004355 __ctl_set_bit(2, 4);
4356 if (vcpu->arch.gs_enabled)
4357 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004358 current->thread.gs_cb = vcpu->arch.host_gscb;
4359 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004360 if (!vcpu->arch.host_gscb)
4361 __ctl_clear_bit(2, 4);
4362 vcpu->arch.host_gscb = NULL;
Heiko Carstens44bada22021-04-15 10:01:27 +02004363 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004364 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004365 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004366}
4367
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004368static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004369{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004370 struct kvm_run *kvm_run = vcpu->run;
4371
Janosch Frank811ea792019-06-14 13:11:21 +02004372 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4373 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4374 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4375 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4376 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4377 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4378 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4379 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4380 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4381 save_access_regs(vcpu->run->s.regs.acrs);
4382 restore_access_regs(vcpu->arch.host_acrs);
4383 /* Save guest register state */
4384 save_fpu_regs();
4385 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4386 /* Restore will be done lazily at return */
4387 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4388 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4389 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004390 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004391}
4392
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004393int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004394{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004395 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004396 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004397
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004398 if (kvm_run->immediate_exit)
4399 return -EINTR;
4400
Thomas Huth200824f2019-09-04 10:51:59 +02004401 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4402 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4403 return -EINVAL;
4404
Christoffer Dallaccb7572017-12-04 21:35:25 +01004405 vcpu_load(vcpu);
4406
David Hildenbrand27291e22014-01-23 12:26:52 +01004407 if (guestdbg_exit_pending(vcpu)) {
4408 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004409 rc = 0;
4410 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004411 }
4412
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004413 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004414
Janosch Frankfe28c7862019-05-15 13:24:30 +02004415 /*
4416 * no need to check the return value of vcpu_start as it can only have
4417 * an error for protvirt, but protvirt means user cpu state
4418 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004419 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4420 kvm_s390_vcpu_start(vcpu);
4421 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004422 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004423 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004424 rc = -EINVAL;
4425 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004426 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004427
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004428 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004429 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004430
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004431 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004432 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004433
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004434 if (signal_pending(current) && !rc) {
4435 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004436 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004437 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004438
David Hildenbrand27291e22014-01-23 12:26:52 +01004439 if (guestdbg_exit_pending(vcpu) && !rc) {
4440 kvm_s390_prepare_debug_exit(vcpu);
4441 rc = 0;
4442 }
4443
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004444 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004445 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004446 rc = 0;
4447 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004448
David Hildenbranddb0758b2016-02-15 09:42:25 +01004449 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004450 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004451
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004452 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004453
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004454 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004455out:
4456 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004457 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004458}
4459
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004460/*
4461 * store status at address
4462 * we use have two special cases:
4463 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4464 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4465 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004466int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004467{
Carsten Otte092670c2011-07-24 10:48:22 +02004468 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004469 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004470 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004471 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004472 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004473
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004474 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004475 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4476 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004477 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004478 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004479 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4480 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004481 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004482 gpa = px;
4483 } else
4484 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004485
4486 /* manually convert vector registers if necessary */
4487 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004488 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004489 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4490 fprs, 128);
4491 } else {
4492 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004493 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004494 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004495 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004496 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004497 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004498 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004499 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004500 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004501 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004502 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004503 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004504 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004505 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004506 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004507 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004508 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004509 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004510 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004511 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004512 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004513 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004514 &vcpu->arch.sie_block->gcr, 128);
4515 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004516}
4517
Thomas Huthe8798922013-11-06 15:46:33 +01004518int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4519{
4520 /*
4521 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004522 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004523 * it into the save area
4524 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004525 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004526 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004527 save_access_regs(vcpu->run->s.regs.acrs);
4528
4529 return kvm_s390_store_status_unloaded(vcpu, addr);
4530}
4531
David Hildenbrand8ad35752014-03-14 11:00:21 +01004532static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4533{
4534 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004535 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004536}
4537
4538static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4539{
Marc Zyngier46808a42021-11-16 16:04:02 +00004540 unsigned long i;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004541 struct kvm_vcpu *vcpu;
4542
4543 kvm_for_each_vcpu(i, vcpu, kvm) {
4544 __disable_ibs_on_vcpu(vcpu);
4545 }
4546}
4547
4548static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4549{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004550 if (!sclp.has_ibs)
4551 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004552 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004553 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004554}
4555
Janosch Frankfe28c7862019-05-15 13:24:30 +02004556int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004557{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004558 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004559
4560 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004561 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004562
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004563 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004564 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004565 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004566 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4567
Janosch Frankfe28c7862019-05-15 13:24:30 +02004568 /* Let's tell the UV that we want to change into the operating state */
4569 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4570 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4571 if (r) {
4572 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4573 return r;
4574 }
4575 }
4576
David Hildenbrand8ad35752014-03-14 11:00:21 +01004577 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004578 if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
David Hildenbrand8ad35752014-03-14 11:00:21 +01004579 started_vcpus++;
4580 }
4581
4582 if (started_vcpus == 0) {
4583 /* we're the only active VCPU -> speed it up */
4584 __enable_ibs_on_vcpu(vcpu);
4585 } else if (started_vcpus == 1) {
4586 /*
4587 * As we are starting a second VCPU, we have to disable
4588 * the IBS facility on all VCPUs to remove potentially
Bhaskar Chowdhury38860752021-02-13 21:02:27 +05304589 * outstanding ENABLE requests.
David Hildenbrand8ad35752014-03-14 11:00:21 +01004590 */
4591 __disable_ibs_on_all_vcpus(vcpu->kvm);
4592 }
4593
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004594 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004595 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004596 * The real PSW might have changed due to a RESTART interpreted by the
4597 * ultravisor. We block all interrupts and let the next sie exit
4598 * refresh our view.
4599 */
4600 if (kvm_s390_pv_cpu_is_protected(vcpu))
4601 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4602 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004603 * Another VCPU might have used IBS while we were offline.
4604 * Let's play safe and flush the VCPU at startup.
4605 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004606 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004607 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004608 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004609}
4610
Janosch Frankfe28c7862019-05-15 13:24:30 +02004611int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004612{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004613 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004614 struct kvm_vcpu *started_vcpu = NULL;
4615
4616 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004617 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004618
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004619 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004620 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004621 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004622 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4623
Janosch Frankfe28c7862019-05-15 13:24:30 +02004624 /* Let's tell the UV that we want to change into the stopped state */
4625 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4626 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4627 if (r) {
4628 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4629 return r;
4630 }
4631 }
4632
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004633 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004634 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004635
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004636 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004637 __disable_ibs_on_vcpu(vcpu);
4638
4639 for (i = 0; i < online_vcpus; i++) {
Marc Zyngier113d10b2021-11-16 16:03:59 +00004640 struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
4641
4642 if (!is_vcpu_stopped(tmp)) {
David Hildenbrand8ad35752014-03-14 11:00:21 +01004643 started_vcpus++;
Marc Zyngier113d10b2021-11-16 16:03:59 +00004644 started_vcpu = tmp;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004645 }
4646 }
4647
4648 if (started_vcpus == 1) {
4649 /*
4650 * As we only have one VCPU left, we want to enable the
4651 * IBS facility for that VCPU to speed it up.
4652 */
4653 __enable_ibs_on_vcpu(started_vcpu);
4654 }
4655
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004656 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004657 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004658}
4659
Cornelia Huckd6712df2012-12-20 15:32:11 +01004660static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4661 struct kvm_enable_cap *cap)
4662{
4663 int r;
4664
4665 if (cap->flags)
4666 return -EINVAL;
4667
4668 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004669 case KVM_CAP_S390_CSS_SUPPORT:
4670 if (!vcpu->kvm->arch.css_support) {
4671 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004672 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004673 trace_kvm_s390_enable_css(vcpu->kvm);
4674 }
4675 r = 0;
4676 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004677 default:
4678 r = -EINVAL;
4679 break;
4680 }
4681 return r;
4682}
4683
Janosch Frank19e12272019-04-02 09:21:06 +02004684static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4685 struct kvm_s390_mem_op *mop)
4686{
4687 void __user *uaddr = (void __user *)mop->buf;
4688 int r = 0;
4689
4690 if (mop->flags || !mop->size)
4691 return -EINVAL;
4692 if (mop->size + mop->sida_offset < mop->size)
4693 return -EINVAL;
4694 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4695 return -E2BIG;
4696
4697 switch (mop->op) {
4698 case KVM_S390_MEMOP_SIDA_READ:
4699 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4700 mop->sida_offset), mop->size))
4701 r = -EFAULT;
4702
4703 break;
4704 case KVM_S390_MEMOP_SIDA_WRITE:
4705 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4706 mop->sida_offset), uaddr, mop->size))
4707 r = -EFAULT;
4708 break;
4709 }
4710 return r;
4711}
Thomas Huth41408c282015-02-06 15:01:21 +01004712static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4713 struct kvm_s390_mem_op *mop)
4714{
4715 void __user *uaddr = (void __user *)mop->buf;
4716 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004717 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004718 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4719 | KVM_S390_MEMOP_F_CHECK_ONLY;
4720
Thomas Hutha13b03b2019-08-29 14:25:17 +02004721 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004722 return -EINVAL;
4723
4724 if (mop->size > MEM_OP_MAX_SIZE)
4725 return -E2BIG;
4726
Janosch Frank19e12272019-04-02 09:21:06 +02004727 if (kvm_s390_pv_cpu_is_protected(vcpu))
4728 return -EINVAL;
4729
Thomas Huth41408c282015-02-06 15:01:21 +01004730 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4731 tmpbuf = vmalloc(mop->size);
4732 if (!tmpbuf)
4733 return -ENOMEM;
4734 }
4735
Thomas Huth41408c282015-02-06 15:01:21 +01004736 switch (mop->op) {
4737 case KVM_S390_MEMOP_LOGICAL_READ:
4738 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004739 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4740 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004741 break;
4742 }
4743 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4744 if (r == 0) {
4745 if (copy_to_user(uaddr, tmpbuf, mop->size))
4746 r = -EFAULT;
4747 }
4748 break;
4749 case KVM_S390_MEMOP_LOGICAL_WRITE:
4750 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004751 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4752 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004753 break;
4754 }
4755 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4756 r = -EFAULT;
4757 break;
4758 }
4759 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4760 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004761 }
4762
Thomas Huth41408c282015-02-06 15:01:21 +01004763 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4764 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4765
4766 vfree(tmpbuf);
4767 return r;
4768}
4769
Janosch Frank19e12272019-04-02 09:21:06 +02004770static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4771 struct kvm_s390_mem_op *mop)
4772{
4773 int r, srcu_idx;
4774
4775 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4776
4777 switch (mop->op) {
4778 case KVM_S390_MEMOP_LOGICAL_READ:
4779 case KVM_S390_MEMOP_LOGICAL_WRITE:
4780 r = kvm_s390_guest_mem_op(vcpu, mop);
4781 break;
4782 case KVM_S390_MEMOP_SIDA_READ:
4783 case KVM_S390_MEMOP_SIDA_WRITE:
4784 /* we are locked against sida going away by the vcpu->mutex */
4785 r = kvm_s390_guest_sida_op(vcpu, mop);
4786 break;
4787 default:
4788 r = -EINVAL;
4789 }
4790
4791 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4792 return r;
4793}
4794
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004795long kvm_arch_vcpu_async_ioctl(struct file *filp,
4796 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004797{
4798 struct kvm_vcpu *vcpu = filp->private_data;
4799 void __user *argp = (void __user *)arg;
4800
Avi Kivity93736622010-05-13 12:35:17 +03004801 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004802 case KVM_S390_IRQ: {
4803 struct kvm_s390_irq s390irq;
4804
Jens Freimann47b43c52014-11-11 20:57:06 +01004805 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004806 return -EFAULT;
4807 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004808 }
Avi Kivity93736622010-05-13 12:35:17 +03004809 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004810 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004811 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004812
4813 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004814 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004815 if (s390int_to_s390irq(&s390int, &s390irq))
4816 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004817 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004818 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004819 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004820 return -ENOIOCTLCMD;
4821}
4822
4823long kvm_arch_vcpu_ioctl(struct file *filp,
4824 unsigned int ioctl, unsigned long arg)
4825{
4826 struct kvm_vcpu *vcpu = filp->private_data;
4827 void __user *argp = (void __user *)arg;
4828 int idx;
4829 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004830 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004831
4832 vcpu_load(vcpu);
4833
4834 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004835 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004836 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004837 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004838 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004839 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004840 case KVM_S390_SET_INITIAL_PSW: {
4841 psw_t psw;
4842
Avi Kivitybc923cc2010-05-13 12:21:46 +03004843 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004844 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004845 break;
4846 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4847 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004848 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004849 case KVM_S390_CLEAR_RESET:
4850 r = 0;
4851 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004852 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4853 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4854 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4855 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4856 rc, rrc);
4857 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004858 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004859 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004860 r = 0;
4861 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004862 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4863 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4864 UVC_CMD_CPU_RESET_INITIAL,
4865 &rc, &rrc);
4866 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4867 rc, rrc);
4868 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004869 break;
4870 case KVM_S390_NORMAL_RESET:
4871 r = 0;
4872 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004873 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4874 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4875 UVC_CMD_CPU_RESET, &rc, &rrc);
4876 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4877 rc, rrc);
4878 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004879 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004880 case KVM_SET_ONE_REG:
4881 case KVM_GET_ONE_REG: {
4882 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004883 r = -EINVAL;
4884 if (kvm_s390_pv_cpu_is_protected(vcpu))
4885 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004886 r = -EFAULT;
4887 if (copy_from_user(&reg, argp, sizeof(reg)))
4888 break;
4889 if (ioctl == KVM_SET_ONE_REG)
4890 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4891 else
4892 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4893 break;
4894 }
Carsten Otte27e03932012-01-04 10:25:21 +01004895#ifdef CONFIG_KVM_S390_UCONTROL
4896 case KVM_S390_UCAS_MAP: {
4897 struct kvm_s390_ucas_mapping ucasmap;
4898
4899 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4900 r = -EFAULT;
4901 break;
4902 }
4903
4904 if (!kvm_is_ucontrol(vcpu->kvm)) {
4905 r = -EINVAL;
4906 break;
4907 }
4908
4909 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4910 ucasmap.vcpu_addr, ucasmap.length);
4911 break;
4912 }
4913 case KVM_S390_UCAS_UNMAP: {
4914 struct kvm_s390_ucas_mapping ucasmap;
4915
4916 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4917 r = -EFAULT;
4918 break;
4919 }
4920
4921 if (!kvm_is_ucontrol(vcpu->kvm)) {
4922 r = -EINVAL;
4923 break;
4924 }
4925
4926 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4927 ucasmap.length);
4928 break;
4929 }
4930#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004931 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004932 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004933 break;
4934 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004935 case KVM_ENABLE_CAP:
4936 {
4937 struct kvm_enable_cap cap;
4938 r = -EFAULT;
4939 if (copy_from_user(&cap, argp, sizeof(cap)))
4940 break;
4941 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4942 break;
4943 }
Thomas Huth41408c282015-02-06 15:01:21 +01004944 case KVM_S390_MEM_OP: {
4945 struct kvm_s390_mem_op mem_op;
4946
4947 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004948 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004949 else
4950 r = -EFAULT;
4951 break;
4952 }
Jens Freimann816c7662014-11-24 17:13:46 +01004953 case KVM_S390_SET_IRQ_STATE: {
4954 struct kvm_s390_irq_state irq_state;
4955
4956 r = -EFAULT;
4957 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4958 break;
4959 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4960 irq_state.len == 0 ||
4961 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4962 r = -EINVAL;
4963 break;
4964 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004965 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004966 r = kvm_s390_set_irq_state(vcpu,
4967 (void __user *) irq_state.buf,
4968 irq_state.len);
4969 break;
4970 }
4971 case KVM_S390_GET_IRQ_STATE: {
4972 struct kvm_s390_irq_state irq_state;
4973
4974 r = -EFAULT;
4975 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4976 break;
4977 if (irq_state.len == 0) {
4978 r = -EINVAL;
4979 break;
4980 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004981 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004982 r = kvm_s390_get_irq_state(vcpu,
4983 (__u8 __user *) irq_state.buf,
4984 irq_state.len);
4985 break;
4986 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004987 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004988 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004989 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004990
4991 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004992 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004993}
4994
Souptick Joarder1499fa82018-04-19 00:49:58 +05304995vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004996{
4997#ifdef CONFIG_KVM_S390_UCONTROL
4998 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4999 && (kvm_is_ucontrol(vcpu->kvm))) {
5000 vmf->page = virt_to_page(vcpu->arch.sie_block);
5001 get_page(vmf->page);
5002 return 0;
5003 }
5004#endif
5005 return VM_FAULT_SIGBUS;
5006}
5007
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005008/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005009int kvm_arch_prepare_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005010 const struct kvm_userspace_memory_region *mem,
Sean Christopherson537a17b2021-12-06 20:54:11 +01005011 const struct kvm_memory_slot *old,
5012 struct kvm_memory_slot *new,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09005013 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005014{
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005015 gpa_t size = new->npages * PAGE_SIZE;
5016
Nick Wangdd2887e2013-03-25 17:22:57 +01005017 /* A few sanity checks. We can have memory slots which have to be
5018 located/ended at a segment boundary (1MB). The memory in userland is
5019 ok to be fragmented into various different vmas. It is okay to mmap()
5020 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005021
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005022 if (new->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005023 return -EINVAL;
5024
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005025 if (size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005026 return -EINVAL;
5027
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005028 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
Dominik Dingela3a92c32014-12-01 17:24:42 +01005029 return -EINVAL;
5030
Janosch Frank29b40f12019-09-30 04:19:18 -04005031 /* When we are protected, we should not change the memory slots */
5032 if (kvm_s390_pv_get_handle(kvm))
5033 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005034 return 0;
5035}
5036
5037void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02005038 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08005039 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02005040 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09005041 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005042{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005043 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02005044
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005045 switch (change) {
5046 case KVM_MR_DELETE:
5047 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5048 old->npages * PAGE_SIZE);
5049 break;
5050 case KVM_MR_MOVE:
5051 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5052 old->npages * PAGE_SIZE);
5053 if (rc)
5054 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005055 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005056 case KVM_MR_CREATE:
Sean Christophersoncf5b4862021-12-06 20:54:15 +01005057 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
5058 new->base_gfn * PAGE_SIZE,
5059 new->npages * PAGE_SIZE);
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005060 break;
5061 case KVM_MR_FLAGS_ONLY:
5062 break;
5063 default:
5064 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5065 }
Carsten Otte598841c2011-07-24 10:48:21 +02005066 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005067 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005068 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005069}
5070
Alexander Yarygin60a37702016-04-01 15:38:57 +03005071static inline unsigned long nonhyp_mask(int i)
5072{
5073 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5074
5075 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5076}
5077
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005078void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5079{
5080 vcpu->valid_wakeup = false;
5081}
5082
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005083static int __init kvm_s390_init(void)
5084{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005085 int i;
5086
David Hildenbrand07197fd2015-01-30 16:01:38 +01005087 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005088 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005089 return -ENODEV;
5090 }
5091
Janosch Franka4499382018-07-13 11:28:31 +01005092 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005093 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005094 return -EINVAL;
5095 }
5096
Alexander Yarygin60a37702016-04-01 15:38:57 +03005097 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005098 kvm_s390_fac_base[i] |=
Sven Schnelle17e89e12021-05-05 22:01:10 +02005099 stfle_fac_list[i] & nonhyp_mask(i);
Alexander Yarygin60a37702016-04-01 15:38:57 +03005100
Michael Mueller9d8d5782015-02-02 15:42:51 +01005101 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005102}
5103
5104static void __exit kvm_s390_exit(void)
5105{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005106 kvm_exit();
5107}
5108
5109module_init(kvm_s390_init);
5110module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005111
5112/*
5113 * Enable autoloading of the kvm module.
5114 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5115 * since x86 takes a different approach.
5116 */
5117#include <linux/miscdevice.h>
5118MODULE_ALIAS_MISCDEV(KVM_MINOR);
5119MODULE_ALIAS("devname:kvm");