blob: 33fea4488ef39abaa475b58bcba199aee1ee94d0 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010048#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010049#include "gaccess.h"
50
Cornelia Huck5786fff2012-07-23 17:20:29 +020051#define CREATE_TRACE_POINTS
52#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020053#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020054
Thomas Huth41408c282015-02-06 15:01:21 +010055#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010056#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010059
Heiko Carstensb0c632d2008-03-25 18:47:20 +010060struct kvm_stats_debugfs_item debugfs_entries[] = {
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020061 VCPU_STAT("userspace_handled", exit_userspace),
62 VCPU_STAT("exit_null", exit_null),
63 VCPU_STAT("exit_validity", exit_validity),
64 VCPU_STAT("exit_stop_request", exit_stop_request),
65 VCPU_STAT("exit_external_request", exit_external_request),
66 VCPU_STAT("exit_io_request", exit_io_request),
67 VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
68 VCPU_STAT("exit_instruction", exit_instruction),
69 VCPU_STAT("exit_pei", exit_pei),
70 VCPU_STAT("exit_program_interruption", exit_program_interruption),
71 VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
72 VCPU_STAT("exit_operation_exception", exit_operation_exception),
73 VCPU_STAT("halt_successful_poll", halt_successful_poll),
74 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
75 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
76 VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
77 VCPU_STAT("halt_wakeup", halt_wakeup),
David Matlackcb953122020-05-08 11:22:40 -070078 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
79 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020080 VCPU_STAT("instruction_lctlg", instruction_lctlg),
81 VCPU_STAT("instruction_lctl", instruction_lctl),
82 VCPU_STAT("instruction_stctl", instruction_stctl),
83 VCPU_STAT("instruction_stctg", instruction_stctg),
84 VCPU_STAT("deliver_ckc", deliver_ckc),
85 VCPU_STAT("deliver_cputm", deliver_cputm),
86 VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
87 VCPU_STAT("deliver_external_call", deliver_external_call),
88 VCPU_STAT("deliver_service_signal", deliver_service_signal),
89 VCPU_STAT("deliver_virtio", deliver_virtio),
90 VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
91 VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
92 VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
93 VCPU_STAT("deliver_program", deliver_program),
94 VCPU_STAT("deliver_io", deliver_io),
95 VCPU_STAT("deliver_machine_check", deliver_machine_check),
96 VCPU_STAT("exit_wait_state", exit_wait_state),
97 VCPU_STAT("inject_ckc", inject_ckc),
98 VCPU_STAT("inject_cputm", inject_cputm),
99 VCPU_STAT("inject_external_call", inject_external_call),
100 VM_STAT("inject_float_mchk", inject_float_mchk),
101 VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
102 VM_STAT("inject_io", inject_io),
103 VCPU_STAT("inject_mchk", inject_mchk),
104 VM_STAT("inject_pfault_done", inject_pfault_done),
105 VCPU_STAT("inject_program", inject_program),
106 VCPU_STAT("inject_restart", inject_restart),
107 VM_STAT("inject_service_signal", inject_service_signal),
108 VCPU_STAT("inject_set_prefix", inject_set_prefix),
109 VCPU_STAT("inject_stop_signal", inject_stop_signal),
110 VCPU_STAT("inject_pfault_init", inject_pfault_init),
111 VM_STAT("inject_virtio", inject_virtio),
112 VCPU_STAT("instruction_epsw", instruction_epsw),
113 VCPU_STAT("instruction_gs", instruction_gs),
114 VCPU_STAT("instruction_io_other", instruction_io_other),
115 VCPU_STAT("instruction_lpsw", instruction_lpsw),
116 VCPU_STAT("instruction_lpswe", instruction_lpswe),
117 VCPU_STAT("instruction_pfmf", instruction_pfmf),
118 VCPU_STAT("instruction_ptff", instruction_ptff),
119 VCPU_STAT("instruction_stidp", instruction_stidp),
120 VCPU_STAT("instruction_sck", instruction_sck),
121 VCPU_STAT("instruction_sckpf", instruction_sckpf),
122 VCPU_STAT("instruction_spx", instruction_spx),
123 VCPU_STAT("instruction_stpx", instruction_stpx),
124 VCPU_STAT("instruction_stap", instruction_stap),
125 VCPU_STAT("instruction_iske", instruction_iske),
126 VCPU_STAT("instruction_ri", instruction_ri),
127 VCPU_STAT("instruction_rrbe", instruction_rrbe),
128 VCPU_STAT("instruction_sske", instruction_sske),
129 VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
130 VCPU_STAT("instruction_essa", instruction_essa),
131 VCPU_STAT("instruction_stsi", instruction_stsi),
132 VCPU_STAT("instruction_stfl", instruction_stfl),
133 VCPU_STAT("instruction_tb", instruction_tb),
134 VCPU_STAT("instruction_tpi", instruction_tpi),
135 VCPU_STAT("instruction_tprot", instruction_tprot),
136 VCPU_STAT("instruction_tsch", instruction_tsch),
137 VCPU_STAT("instruction_sthyi", instruction_sthyi),
138 VCPU_STAT("instruction_sie", instruction_sie),
139 VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
140 VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
141 VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
142 VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
143 VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
144 VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
145 VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
146 VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
147 VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
148 VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
149 VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
150 VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
151 VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
152 VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
153 VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
154 VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
155 VCPU_STAT("instruction_diag_10", diagnose_10),
156 VCPU_STAT("instruction_diag_44", diagnose_44),
157 VCPU_STAT("instruction_diag_9c", diagnose_9c),
158 VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
159 VCPU_STAT("instruction_diag_258", diagnose_258),
160 VCPU_STAT("instruction_diag_308", diagnose_308),
161 VCPU_STAT("instruction_diag_500", diagnose_500),
162 VCPU_STAT("instruction_diag_other", diagnose_other),
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163 { NULL }
164};
165
Collin L. Walling8fa16962016-07-26 15:29:44 -0400166struct kvm_s390_tod_clock_ext {
167 __u8 epoch_idx;
168 __u64 tod;
169 __u8 reserved[7];
170} __packed;
171
David Hildenbranda411edf2016-02-02 15:41:22 +0100172/* allow nested virtualization in KVM (if enabled by user space) */
173static int nested;
174module_param(nested, int, S_IRUGO);
175MODULE_PARM_DESC(nested, "Nested virtualization support");
176
Janosch Franka4499382018-07-13 11:28:31 +0100177/* allow 1m huge page guest backing, if !nested */
178static int hpage;
179module_param(hpage, int, 0444);
180MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100181
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500182/* maximum percentage of steal time for polling. >100 is treated like 100 */
183static u8 halt_poll_max_steal = 10;
184module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000185MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500186
Michael Muellercc674ef2020-02-27 10:10:31 +0100187/* if set to true, the GISA will be initialized and used if available */
188static bool use_gisa = true;
189module_param(use_gisa, bool, 0644);
190MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
191
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000192/*
193 * For now we handle at most 16 double words as this is what the s390 base
194 * kernel handles and stores in the prefix page. If we ever need to go beyond
195 * this, this requires changes to code, but the external uapi can stay.
196 */
197#define SIZE_INTERNAL 16
198
199/*
200 * Base feature mask that defines default mask for facilities. Consists of the
201 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
202 */
203static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
204/*
205 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
206 * and defines the facilities that can be enabled via a cpu model.
207 */
208static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
209
210static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200211{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000212 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
213 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
214 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
215 sizeof(S390_lowcore.stfle_fac_list));
216
217 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200218}
219
David Hildenbrand15c97052015-03-19 17:36:43 +0100220/* available cpu features supported by kvm */
221static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200222/* available subfunctions indicated via query / "test bit" */
223static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100224
Michael Mueller9d8d5782015-02-02 15:42:51 +0100225static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200226static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200227debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400228debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100229
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100230/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200231int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100232{
233 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200234 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100235}
236
Sean Christophersonb9904082020-03-21 13:25:55 -0700237int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700238{
239 return 0;
240}
241
Janosch Frank29b40f12019-09-30 04:19:18 -0400242/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100243static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
244 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400245static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200246
David Hildenbrand15757672018-02-07 12:46:45 +0100247static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
248{
249 u8 delta_idx = 0;
250
251 /*
252 * The TOD jumps by delta, we have to compensate this by adding
253 * -delta to the epoch.
254 */
255 delta = -delta;
256
257 /* sign-extension - we're adding to signed values below */
258 if ((s64)delta < 0)
259 delta_idx = -1;
260
261 scb->epoch += delta;
262 if (scb->ecd & ECD_MEF) {
263 scb->epdx += delta_idx;
264 if (scb->epoch < delta)
265 scb->epdx += 1;
266 }
267}
268
Fan Zhangfdf03652015-05-13 10:58:41 +0200269/*
270 * This callback is executed during stop_machine(). All CPUs are therefore
271 * temporarily stopped. In order not to change guest behavior, we have to
272 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
273 * so a CPU won't be stopped while calculating with the epoch.
274 */
275static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
276 void *v)
277{
278 struct kvm *kvm;
279 struct kvm_vcpu *vcpu;
280 int i;
281 unsigned long long *delta = v;
282
283 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200284 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
286 if (i == 0) {
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
289 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100290 if (vcpu->arch.cputm_enabled)
291 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100292 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100293 kvm_clock_sync_scb(vcpu->arch.vsie_block,
294 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200295 }
296 }
297 return NOTIFY_OK;
298}
299
300static struct notifier_block kvm_clock_notifier = {
301 .notifier_call = kvm_clock_sync,
302};
303
Sean Christophersonb9904082020-03-21 13:25:55 -0700304int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100305{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200306 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100307 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200308 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
309 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200310 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100312 return 0;
313}
314
315void kvm_arch_hardware_unsetup(void)
316{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100317 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200318 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200319 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
320 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100321}
322
David Hildenbrand22be5a12016-01-21 13:22:54 +0100323static void allow_cpu_feat(unsigned long nr)
324{
325 set_bit_inv(nr, kvm_s390_available_cpu_feat);
326}
327
David Hildenbrand0a763c72016-05-18 16:03:47 +0200328static inline int plo_test_bit(unsigned char nr)
329{
330 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100331 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200332
333 asm volatile(
334 /* Parameter registers are ignored for "test bit" */
335 " plo 0,0,0,0(0)\n"
336 " ipm %0\n"
337 " srl %0,28\n"
338 : "=d" (cc)
339 : "d" (r0)
340 : "cc");
341 return cc == 0;
342}
343
Heiko Carstensd0dea732019-10-02 14:34:37 +0200344static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500345{
346 register unsigned long r0 asm("0") = 0; /* query function */
347 register unsigned long r1 asm("1") = (unsigned long) query;
348
349 asm volatile(
350 /* Parameter regs are ignored */
351 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200352 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500353 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200354 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500355}
356
Christian Borntraeger173aec22018-12-28 10:59:06 +0100357#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100358#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100359
David Hildenbrand22be5a12016-01-21 13:22:54 +0100360static void kvm_s390_cpu_feat_init(void)
361{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362 int i;
363
364 for (i = 0; i < 256; ++i) {
365 if (plo_test_bit(i))
366 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
367 }
368
369 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400370 ptff(kvm_s390_available_subfunc.ptff,
371 sizeof(kvm_s390_available_subfunc.ptff),
372 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200373
374 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200375 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.kmac);
377 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
378 kvm_s390_available_subfunc.kmc);
379 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.km);
381 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kimd);
383 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200385 }
386 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200387 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200389 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200390 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kmctr);
392 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
393 kvm_s390_available_subfunc.kmf);
394 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.kmo);
396 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
397 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200398 }
399 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100400 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200401 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200402
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400403 if (test_facility(146)) /* MSA8 */
404 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.kma);
406
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100407 if (test_facility(155)) /* MSA9 */
408 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kdsa);
410
Christian Borntraeger173aec22018-12-28 10:59:06 +0100411 if (test_facility(150)) /* SORTL */
412 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
413
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100414 if (test_facility(151)) /* DFLTCC */
415 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
416
David Hildenbrand22be5a12016-01-21 13:22:54 +0100417 if (MACHINE_HAS_ESOP)
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200419 /*
420 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
421 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
422 */
423 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100424 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200425 return;
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100427 if (sclp.has_64bscao)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100429 if (sclp.has_siif)
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100431 if (sclp.has_gpere)
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100433 if (sclp.has_gsls)
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100435 if (sclp.has_ib)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100437 if (sclp.has_cei)
438 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100439 if (sclp.has_ibs)
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500441 if (sclp.has_kss)
442 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200443 /*
444 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
445 * all skey handling functions read/set the skey from the PGSTE
446 * instead of the real storage key.
447 *
448 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
449 * pages being detected as preserved although they are resident.
450 *
451 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
452 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
453 *
454 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
455 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
456 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
457 *
458 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
459 * cannot easily shadow the SCA because of the ipte lock.
460 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100461}
462
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100463int kvm_arch_init(void *opaque)
464{
Janosch Frankf76f6372019-10-02 03:56:27 -0400465 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100466
Christian Borntraeger78f26132015-07-22 15:50:58 +0200467 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
468 if (!kvm_s390_dbf)
469 return -ENOMEM;
470
Janosch Frank3e6c5562019-10-02 04:46:58 -0400471 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
472 if (!kvm_s390_dbf_uv)
473 goto out;
474
475 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
476 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400477 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200478
David Hildenbrand22be5a12016-01-21 13:22:54 +0100479 kvm_s390_cpu_feat_init();
480
Cornelia Huck84877d92014-09-02 10:27:35 +0100481 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100482 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
483 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100484 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400485 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100486 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100487
488 rc = kvm_s390_gib_init(GAL_ISC);
489 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400490 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100491
Michael Mueller308c3e62018-11-30 15:32:06 +0100492 return 0;
493
Janosch Frankf76f6372019-10-02 03:56:27 -0400494out:
495 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100496 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100497}
498
Christian Borntraeger78f26132015-07-22 15:50:58 +0200499void kvm_arch_exit(void)
500{
Michael Mueller1282c212019-01-31 09:52:40 +0100501 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200502 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400503 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200504}
505
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100506/* Section: device related */
507long kvm_arch_dev_ioctl(struct file *filp,
508 unsigned int ioctl, unsigned long arg)
509{
510 if (ioctl == KVM_S390_ENABLE_SIE)
511 return s390_enable_sie();
512 return -EINVAL;
513}
514
Alexander Graf784aa3d2014-07-14 18:27:35 +0200515int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100516{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100517 int r;
518
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200519 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100520 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200521 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100522 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100523#ifdef CONFIG_KVM_S390_UCONTROL
524 case KVM_CAP_S390_UCONTROL:
525#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200526 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100527 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200528 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100529 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100530 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100531 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200532 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200533 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200534 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200535 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100536 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100537 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200538 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100539 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400540 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100541 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200542 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200543 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100544 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100545 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500546 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400547 case KVM_CAP_SET_GUEST_DEBUG:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100548 r = 1;
549 break;
Janosch Franka4499382018-07-13 11:28:31 +0100550 case KVM_CAP_S390_HPAGE_1M:
551 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100552 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100553 r = 1;
554 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100555 case KVM_CAP_S390_MEM_OP:
556 r = MEM_OP_MAX_SIZE;
557 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200558 case KVM_CAP_NR_VCPUS:
559 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200560 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100561 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200562 if (!kvm_s390_use_sca_entries())
563 r = KVM_MAX_VCPUS;
564 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100565 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200566 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200567 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100568 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200569 break;
Eric Farman68c55752014-06-09 10:57:26 -0400570 case KVM_CAP_S390_VECTOR_REGISTERS:
571 r = MACHINE_HAS_VX;
572 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800573 case KVM_CAP_S390_RI:
574 r = test_facility(64);
575 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100576 case KVM_CAP_S390_GS:
577 r = test_facility(133);
578 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100579 case KVM_CAP_S390_BPB:
580 r = test_facility(82);
581 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500582 case KVM_CAP_S390_PROTECTED:
583 r = is_prot_virt_host();
584 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200585 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100586 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200587 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100588 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100589}
590
Sean Christopherson0dff0842020-02-18 13:07:29 -0800591void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400592{
Janosch Frank0959e162018-07-17 13:21:22 +0100593 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400594 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100595 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400596 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100597 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400598
Janosch Frank0959e162018-07-17 13:21:22 +0100599 /* Loop over all guest segments */
600 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400601 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100602 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
603 gaddr = gfn_to_gpa(cur_gfn);
604 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
605 if (kvm_is_error_hva(vmaddr))
606 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400607
Janosch Frank0959e162018-07-17 13:21:22 +0100608 bitmap_zero(bitmap, _PAGE_ENTRIES);
609 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
610 for (i = 0; i < _PAGE_ENTRIES; i++) {
611 if (test_bit(i, bitmap))
612 mark_page_dirty(kvm, cur_gfn + i);
613 }
614
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100615 if (fatal_signal_pending(current))
616 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100617 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400619}
620
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100621/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200622static void sca_del_vcpu(struct kvm_vcpu *vcpu);
623
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100624/*
625 * Get (and clear) the dirty memory log for a memory slot.
626 */
627int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
628 struct kvm_dirty_log *log)
629{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400630 int r;
631 unsigned long n;
632 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800633 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400634
Janosch Franke1e8a962017-02-02 16:39:31 +0100635 if (kvm_is_ucontrol(kvm))
636 return -EINVAL;
637
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400638 mutex_lock(&kvm->slots_lock);
639
640 r = -EINVAL;
641 if (log->slot >= KVM_USER_MEM_SLOTS)
642 goto out;
643
Sean Christopherson2a49f612020-02-18 13:07:30 -0800644 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400645 if (r)
646 goto out;
647
648 /* Clear the dirty log */
649 if (is_dirty) {
650 n = kvm_dirty_bitmap_bytes(memslot);
651 memset(memslot->dirty_bitmap, 0, n);
652 }
653 r = 0;
654out:
655 mutex_unlock(&kvm->slots_lock);
656 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100657}
658
David Hildenbrand6502a342016-06-21 14:19:51 +0200659static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
660{
661 unsigned int i;
662 struct kvm_vcpu *vcpu;
663
664 kvm_for_each_vcpu(i, vcpu, kvm) {
665 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
666 }
667}
668
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100669int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200670{
671 int r;
672
673 if (cap->flags)
674 return -EINVAL;
675
676 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200677 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200678 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200679 kvm->arch.use_irqchip = 1;
680 r = 0;
681 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200682 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200683 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200684 kvm->arch.user_sigp = 1;
685 r = 0;
686 break;
Eric Farman68c55752014-06-09 10:57:26 -0400687 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100688 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200689 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100690 r = -EBUSY;
691 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100692 set_kvm_facility(kvm->arch.model.fac_mask, 129);
693 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200694 if (test_facility(134)) {
695 set_kvm_facility(kvm->arch.model.fac_mask, 134);
696 set_kvm_facility(kvm->arch.model.fac_list, 134);
697 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100698 if (test_facility(135)) {
699 set_kvm_facility(kvm->arch.model.fac_mask, 135);
700 set_kvm_facility(kvm->arch.model.fac_list, 135);
701 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100702 if (test_facility(148)) {
703 set_kvm_facility(kvm->arch.model.fac_mask, 148);
704 set_kvm_facility(kvm->arch.model.fac_list, 148);
705 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100706 if (test_facility(152)) {
707 set_kvm_facility(kvm->arch.model.fac_mask, 152);
708 set_kvm_facility(kvm->arch.model.fac_list, 152);
709 }
Michael Mueller18280d82015-03-16 16:05:41 +0100710 r = 0;
711 } else
712 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100713 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200714 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
715 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400716 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800717 case KVM_CAP_S390_RI:
718 r = -EINVAL;
719 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200720 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800721 r = -EBUSY;
722 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100723 set_kvm_facility(kvm->arch.model.fac_mask, 64);
724 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800725 r = 0;
726 }
727 mutex_unlock(&kvm->lock);
728 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
729 r ? "(not available)" : "(success)");
730 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100731 case KVM_CAP_S390_AIS:
732 mutex_lock(&kvm->lock);
733 if (kvm->created_vcpus) {
734 r = -EBUSY;
735 } else {
736 set_kvm_facility(kvm->arch.model.fac_mask, 72);
737 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100738 r = 0;
739 }
740 mutex_unlock(&kvm->lock);
741 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
742 r ? "(not available)" : "(success)");
743 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100744 case KVM_CAP_S390_GS:
745 r = -EINVAL;
746 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100747 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100748 r = -EBUSY;
749 } else if (test_facility(133)) {
750 set_kvm_facility(kvm->arch.model.fac_mask, 133);
751 set_kvm_facility(kvm->arch.model.fac_list, 133);
752 r = 0;
753 }
754 mutex_unlock(&kvm->lock);
755 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
756 r ? "(not available)" : "(success)");
757 break;
Janosch Franka4499382018-07-13 11:28:31 +0100758 case KVM_CAP_S390_HPAGE_1M:
759 mutex_lock(&kvm->lock);
760 if (kvm->created_vcpus)
761 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100762 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100763 r = -EINVAL;
764 else {
765 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200766 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100767 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200768 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100769 /*
770 * We might have to create fake 4k page
771 * tables. To avoid that the hardware works on
772 * stale PGSTEs, we emulate these instructions.
773 */
774 kvm->arch.use_skf = 0;
775 kvm->arch.use_pfmfi = 0;
776 }
777 mutex_unlock(&kvm->lock);
778 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
779 r ? "(not available)" : "(success)");
780 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100781 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200782 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100783 kvm->arch.user_stsi = 1;
784 r = 0;
785 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200786 case KVM_CAP_S390_USER_INSTR0:
787 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
788 kvm->arch.user_instr0 = 1;
789 icpt_operexc_on_all_vcpus(kvm);
790 r = 0;
791 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200792 default:
793 r = -EINVAL;
794 break;
795 }
796 return r;
797}
798
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100799static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
800{
801 int ret;
802
803 switch (attr->attr) {
804 case KVM_S390_VM_MEM_LIMIT_SIZE:
805 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200806 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100807 kvm->arch.mem_limit);
808 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100809 ret = -EFAULT;
810 break;
811 default:
812 ret = -ENXIO;
813 break;
814 }
815 return ret;
816}
817
818static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200819{
820 int ret;
821 unsigned int idx;
822 switch (attr->attr) {
823 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100824 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100825 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200826 break;
827
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200828 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200829 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100830 if (kvm->created_vcpus)
831 ret = -EBUSY;
832 else if (kvm->mm->context.allow_gmap_hpage_1m)
833 ret = -EINVAL;
834 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200835 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100836 /* Not compatible with cmma. */
837 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200838 ret = 0;
839 }
840 mutex_unlock(&kvm->lock);
841 break;
842 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100843 ret = -ENXIO;
844 if (!sclp.has_cmma)
845 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200846 ret = -EINVAL;
847 if (!kvm->arch.use_cmma)
848 break;
849
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200850 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200851 mutex_lock(&kvm->lock);
852 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200853 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200854 srcu_read_unlock(&kvm->srcu, idx);
855 mutex_unlock(&kvm->lock);
856 ret = 0;
857 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100858 case KVM_S390_VM_MEM_LIMIT_SIZE: {
859 unsigned long new_limit;
860
861 if (kvm_is_ucontrol(kvm))
862 return -EINVAL;
863
864 if (get_user(new_limit, (u64 __user *)attr->addr))
865 return -EFAULT;
866
Dominik Dingela3a92c32014-12-01 17:24:42 +0100867 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
868 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100869 return -E2BIG;
870
Dominik Dingela3a92c32014-12-01 17:24:42 +0100871 if (!new_limit)
872 return -EINVAL;
873
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100874 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100875 if (new_limit != KVM_S390_NO_MEM_LIMIT)
876 new_limit -= 1;
877
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100878 ret = -EBUSY;
879 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200880 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100881 /* gmap_create will round the limit up */
882 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100883
884 if (!new) {
885 ret = -ENOMEM;
886 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100887 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100888 new->private = kvm;
889 kvm->arch.gmap = new;
890 ret = 0;
891 }
892 }
893 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100894 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
895 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
896 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100897 break;
898 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200899 default:
900 ret = -ENXIO;
901 break;
902 }
903 return ret;
904}
905
Tony Krowiaka374e892014-09-03 10:13:53 +0200906static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
907
Tony Krowiak20c922f2018-04-22 11:37:03 -0400908void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200909{
910 struct kvm_vcpu *vcpu;
911 int i;
912
Tony Krowiak20c922f2018-04-22 11:37:03 -0400913 kvm_s390_vcpu_block_all(kvm);
914
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400915 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400916 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400917 /* recreate the shadow crycb by leaving the VSIE handler */
918 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
919 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400920
921 kvm_s390_vcpu_unblock_all(kvm);
922}
923
924static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
925{
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 mutex_lock(&kvm->lock);
927 switch (attr->attr) {
928 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200929 if (!test_kvm_facility(kvm, 76)) {
930 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400931 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200932 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200933 get_random_bytes(
934 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
935 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
936 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200937 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200938 break;
939 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200940 if (!test_kvm_facility(kvm, 76)) {
941 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400942 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200943 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200944 get_random_bytes(
945 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
946 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
947 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200948 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200949 break;
950 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200951 if (!test_kvm_facility(kvm, 76)) {
952 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400953 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200954 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200955 kvm->arch.crypto.aes_kw = 0;
956 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
957 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200958 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200959 break;
960 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200961 if (!test_kvm_facility(kvm, 76)) {
962 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400963 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200964 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200965 kvm->arch.crypto.dea_kw = 0;
966 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
967 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200968 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200969 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400970 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
971 if (!ap_instructions_available()) {
972 mutex_unlock(&kvm->lock);
973 return -EOPNOTSUPP;
974 }
975 kvm->arch.crypto.apie = 1;
976 break;
977 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
978 if (!ap_instructions_available()) {
979 mutex_unlock(&kvm->lock);
980 return -EOPNOTSUPP;
981 }
982 kvm->arch.crypto.apie = 0;
983 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200984 default:
985 mutex_unlock(&kvm->lock);
986 return -ENXIO;
987 }
988
Tony Krowiak20c922f2018-04-22 11:37:03 -0400989 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200990 mutex_unlock(&kvm->lock);
991 return 0;
992}
993
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200994static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
995{
996 int cx;
997 struct kvm_vcpu *vcpu;
998
999 kvm_for_each_vcpu(cx, vcpu, kvm)
1000 kvm_s390_sync_request(req, vcpu);
1001}
1002
1003/*
1004 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001005 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001006 */
1007static int kvm_s390_vm_start_migration(struct kvm *kvm)
1008{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001009 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001011 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001012 int slotnr;
1013
1014 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001015 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001016 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 slots = kvm_memslots(kvm);
1018 if (!slots || !slots->used_slots)
1019 return -EINVAL;
1020
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001021 if (!kvm->arch.use_cmma) {
1022 kvm->arch.migration_mode = 1;
1023 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001024 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001025 /* mark all the pages in active slots as dirty */
1026 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1027 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001028 if (!ms->dirty_bitmap)
1029 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001030 /*
1031 * The second half of the bitmap is only used on x86,
1032 * and would be wasted otherwise, so we put it to good
1033 * use here to keep track of the state of the storage
1034 * attributes.
1035 */
1036 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1037 ram_pages += ms->npages;
1038 }
1039 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1040 kvm->arch.migration_mode = 1;
1041 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
1043}
1044
1045/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001046 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 * kvm_s390_vm_start_migration.
1048 */
1049static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1050{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001051 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001052 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001053 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001054 kvm->arch.migration_mode = 0;
1055 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001056 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 return 0;
1058}
1059
1060static int kvm_s390_vm_set_migration(struct kvm *kvm,
1061 struct kvm_device_attr *attr)
1062{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001063 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001064
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001065 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001066 switch (attr->attr) {
1067 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001068 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001069 break;
1070 case KVM_S390_VM_MIGRATION_STOP:
1071 res = kvm_s390_vm_stop_migration(kvm);
1072 break;
1073 default:
1074 break;
1075 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001076 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001077
1078 return res;
1079}
1080
1081static int kvm_s390_vm_get_migration(struct kvm *kvm,
1082 struct kvm_device_attr *attr)
1083{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001084 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001085
1086 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1087 return -ENXIO;
1088
1089 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1090 return -EFAULT;
1091 return 0;
1092}
1093
Collin L. Walling8fa16962016-07-26 15:29:44 -04001094static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1095{
1096 struct kvm_s390_vm_tod_clock gtod;
1097
1098 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1099 return -EFAULT;
1100
David Hildenbrand0e7def52018-02-07 12:46:43 +01001101 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001102 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001103 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001104
1105 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1106 gtod.epoch_idx, gtod.tod);
1107
1108 return 0;
1109}
1110
Jason J. Herne72f25022014-11-25 09:46:02 -05001111static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1112{
1113 u8 gtod_high;
1114
1115 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1116 sizeof(gtod_high)))
1117 return -EFAULT;
1118
1119 if (gtod_high != 0)
1120 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001121 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001122
1123 return 0;
1124}
1125
1126static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1127{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001128 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001129
David Hildenbrand0e7def52018-02-07 12:46:43 +01001130 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1131 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001132 return -EFAULT;
1133
David Hildenbrand0e7def52018-02-07 12:46:43 +01001134 kvm_s390_set_tod_clock(kvm, &gtod);
1135 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001136 return 0;
1137}
1138
1139static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1140{
1141 int ret;
1142
1143 if (attr->flags)
1144 return -EINVAL;
1145
1146 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001147 case KVM_S390_VM_TOD_EXT:
1148 ret = kvm_s390_set_tod_ext(kvm, attr);
1149 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001150 case KVM_S390_VM_TOD_HIGH:
1151 ret = kvm_s390_set_tod_high(kvm, attr);
1152 break;
1153 case KVM_S390_VM_TOD_LOW:
1154 ret = kvm_s390_set_tod_low(kvm, attr);
1155 break;
1156 default:
1157 ret = -ENXIO;
1158 break;
1159 }
1160 return ret;
1161}
1162
David Hildenbrand33d1b272018-04-27 14:36:13 +02001163static void kvm_s390_get_tod_clock(struct kvm *kvm,
1164 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001165{
1166 struct kvm_s390_tod_clock_ext htod;
1167
1168 preempt_disable();
1169
1170 get_tod_clock_ext((char *)&htod);
1171
1172 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001173 gtod->epoch_idx = 0;
1174 if (test_kvm_facility(kvm, 139)) {
1175 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1176 if (gtod->tod < htod.tod)
1177 gtod->epoch_idx += 1;
1178 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001179
1180 preempt_enable();
1181}
1182
1183static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1184{
1185 struct kvm_s390_vm_tod_clock gtod;
1186
1187 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001188 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001189 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1190 return -EFAULT;
1191
1192 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1193 gtod.epoch_idx, gtod.tod);
1194 return 0;
1195}
1196
Jason J. Herne72f25022014-11-25 09:46:02 -05001197static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1198{
1199 u8 gtod_high = 0;
1200
1201 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1202 sizeof(gtod_high)))
1203 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001204 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001205
1206 return 0;
1207}
1208
1209static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1210{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001211 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001212
David Hildenbrand60417fc2015-09-29 16:20:36 +02001213 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001214 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1215 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001216 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001217
1218 return 0;
1219}
1220
1221static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1222{
1223 int ret;
1224
1225 if (attr->flags)
1226 return -EINVAL;
1227
1228 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001229 case KVM_S390_VM_TOD_EXT:
1230 ret = kvm_s390_get_tod_ext(kvm, attr);
1231 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001232 case KVM_S390_VM_TOD_HIGH:
1233 ret = kvm_s390_get_tod_high(kvm, attr);
1234 break;
1235 case KVM_S390_VM_TOD_LOW:
1236 ret = kvm_s390_get_tod_low(kvm, attr);
1237 break;
1238 default:
1239 ret = -ENXIO;
1240 break;
1241 }
1242 return ret;
1243}
1244
Michael Mueller658b6ed2015-02-02 15:49:35 +01001245static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1246{
1247 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001248 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001249 int ret = 0;
1250
1251 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001252 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001253 ret = -EBUSY;
1254 goto out;
1255 }
1256 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1257 if (!proc) {
1258 ret = -ENOMEM;
1259 goto out;
1260 }
1261 if (!copy_from_user(proc, (void __user *)attr->addr,
1262 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001263 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001264 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1265 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001266 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001267 if (proc->ibc > unblocked_ibc)
1268 kvm->arch.model.ibc = unblocked_ibc;
1269 else if (proc->ibc < lowest_ibc)
1270 kvm->arch.model.ibc = lowest_ibc;
1271 else
1272 kvm->arch.model.ibc = proc->ibc;
1273 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001274 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001275 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001276 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1277 kvm->arch.model.ibc,
1278 kvm->arch.model.cpuid);
1279 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1280 kvm->arch.model.fac_list[0],
1281 kvm->arch.model.fac_list[1],
1282 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 } else
1284 ret = -EFAULT;
1285 kfree(proc);
1286out:
1287 mutex_unlock(&kvm->lock);
1288 return ret;
1289}
1290
David Hildenbrand15c97052015-03-19 17:36:43 +01001291static int kvm_s390_set_processor_feat(struct kvm *kvm,
1292 struct kvm_device_attr *attr)
1293{
1294 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001295
1296 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1297 return -EFAULT;
1298 if (!bitmap_subset((unsigned long *) data.feat,
1299 kvm_s390_available_cpu_feat,
1300 KVM_S390_VM_CPU_FEAT_NR_BITS))
1301 return -EINVAL;
1302
1303 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001304 if (kvm->created_vcpus) {
1305 mutex_unlock(&kvm->lock);
1306 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001307 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001308 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1309 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001310 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001311 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1312 data.feat[0],
1313 data.feat[1],
1314 data.feat[2]);
1315 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001316}
1317
David Hildenbrand0a763c72016-05-18 16:03:47 +02001318static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1319 struct kvm_device_attr *attr)
1320{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001321 mutex_lock(&kvm->lock);
1322 if (kvm->created_vcpus) {
1323 mutex_unlock(&kvm->lock);
1324 return -EBUSY;
1325 }
1326
1327 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1328 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1329 mutex_unlock(&kvm->lock);
1330 return -EFAULT;
1331 }
1332 mutex_unlock(&kvm->lock);
1333
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001334 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1335 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1339 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1340 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1341 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1342 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1345 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1348 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1349 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1350 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1351 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1352 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1354 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1355 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1356 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1357 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1358 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1360 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1363 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1364 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1366 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1367 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1369 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1372 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1375 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001378 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1379 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001381 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1382 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001386 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1387 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001391
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001392 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001393}
1394
Michael Mueller658b6ed2015-02-02 15:49:35 +01001395static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1396{
1397 int ret = -ENXIO;
1398
1399 switch (attr->attr) {
1400 case KVM_S390_VM_CPU_PROCESSOR:
1401 ret = kvm_s390_set_processor(kvm, attr);
1402 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001403 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1404 ret = kvm_s390_set_processor_feat(kvm, attr);
1405 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001406 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1407 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1408 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001409 }
1410 return ret;
1411}
1412
1413static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1414{
1415 struct kvm_s390_vm_cpu_processor *proc;
1416 int ret = 0;
1417
1418 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1419 if (!proc) {
1420 ret = -ENOMEM;
1421 goto out;
1422 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001423 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001424 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001425 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1426 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001427 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1428 kvm->arch.model.ibc,
1429 kvm->arch.model.cpuid);
1430 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1431 kvm->arch.model.fac_list[0],
1432 kvm->arch.model.fac_list[1],
1433 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001434 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1435 ret = -EFAULT;
1436 kfree(proc);
1437out:
1438 return ret;
1439}
1440
1441static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1442{
1443 struct kvm_s390_vm_cpu_machine *mach;
1444 int ret = 0;
1445
1446 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1447 if (!mach) {
1448 ret = -ENOMEM;
1449 goto out;
1450 }
1451 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001452 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001453 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001454 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001455 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001456 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001457 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1458 kvm->arch.model.ibc,
1459 kvm->arch.model.cpuid);
1460 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1461 mach->fac_mask[0],
1462 mach->fac_mask[1],
1463 mach->fac_mask[2]);
1464 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1465 mach->fac_list[0],
1466 mach->fac_list[1],
1467 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001468 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1469 ret = -EFAULT;
1470 kfree(mach);
1471out:
1472 return ret;
1473}
1474
David Hildenbrand15c97052015-03-19 17:36:43 +01001475static int kvm_s390_get_processor_feat(struct kvm *kvm,
1476 struct kvm_device_attr *attr)
1477{
1478 struct kvm_s390_vm_cpu_feat data;
1479
1480 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1481 KVM_S390_VM_CPU_FEAT_NR_BITS);
1482 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1483 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001484 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1485 data.feat[0],
1486 data.feat[1],
1487 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001488 return 0;
1489}
1490
1491static int kvm_s390_get_machine_feat(struct kvm *kvm,
1492 struct kvm_device_attr *attr)
1493{
1494 struct kvm_s390_vm_cpu_feat data;
1495
1496 bitmap_copy((unsigned long *) data.feat,
1497 kvm_s390_available_cpu_feat,
1498 KVM_S390_VM_CPU_FEAT_NR_BITS);
1499 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1500 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001501 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1502 data.feat[0],
1503 data.feat[1],
1504 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001505 return 0;
1506}
1507
David Hildenbrand0a763c72016-05-18 16:03:47 +02001508static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1509 struct kvm_device_attr *attr)
1510{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001511 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1512 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1513 return -EFAULT;
1514
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001515 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1520 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1532 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1535 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1538 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1541 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1544 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1547 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1550 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1553 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1556 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001559 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001562 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001567 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001572
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001573 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001574}
1575
1576static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1577 struct kvm_device_attr *attr)
1578{
1579 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1580 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1581 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001582
1583 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1586 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1587 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1588 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1590 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1591 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1592 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1594 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1597 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1599 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1600 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1601 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1602 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1603 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1604 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1605 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1606 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1607 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1608 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1609 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1612 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1613 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1614 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1615 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1616 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1617 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1618 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1621 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1624 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001627 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1628 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1629 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001630 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1631 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1632 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1633 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1634 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001635 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1636 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1637 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1638 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1639 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001640
David Hildenbrand0a763c72016-05-18 16:03:47 +02001641 return 0;
1642}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001643
Michael Mueller658b6ed2015-02-02 15:49:35 +01001644static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1645{
1646 int ret = -ENXIO;
1647
1648 switch (attr->attr) {
1649 case KVM_S390_VM_CPU_PROCESSOR:
1650 ret = kvm_s390_get_processor(kvm, attr);
1651 break;
1652 case KVM_S390_VM_CPU_MACHINE:
1653 ret = kvm_s390_get_machine(kvm, attr);
1654 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001655 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1656 ret = kvm_s390_get_processor_feat(kvm, attr);
1657 break;
1658 case KVM_S390_VM_CPU_MACHINE_FEAT:
1659 ret = kvm_s390_get_machine_feat(kvm, attr);
1660 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001661 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1662 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1663 break;
1664 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1665 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1666 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001667 }
1668 return ret;
1669}
1670
Dominik Dingelf2061652014-04-09 13:13:00 +02001671static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1672{
1673 int ret;
1674
1675 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001676 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001677 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001678 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001679 case KVM_S390_VM_TOD:
1680 ret = kvm_s390_set_tod(kvm, attr);
1681 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001682 case KVM_S390_VM_CPU_MODEL:
1683 ret = kvm_s390_set_cpu_model(kvm, attr);
1684 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001685 case KVM_S390_VM_CRYPTO:
1686 ret = kvm_s390_vm_set_crypto(kvm, attr);
1687 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001688 case KVM_S390_VM_MIGRATION:
1689 ret = kvm_s390_vm_set_migration(kvm, attr);
1690 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001691 default:
1692 ret = -ENXIO;
1693 break;
1694 }
1695
1696 return ret;
1697}
1698
1699static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1700{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001701 int ret;
1702
1703 switch (attr->group) {
1704 case KVM_S390_VM_MEM_CTRL:
1705 ret = kvm_s390_get_mem_control(kvm, attr);
1706 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001707 case KVM_S390_VM_TOD:
1708 ret = kvm_s390_get_tod(kvm, attr);
1709 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001710 case KVM_S390_VM_CPU_MODEL:
1711 ret = kvm_s390_get_cpu_model(kvm, attr);
1712 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001713 case KVM_S390_VM_MIGRATION:
1714 ret = kvm_s390_vm_get_migration(kvm, attr);
1715 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001716 default:
1717 ret = -ENXIO;
1718 break;
1719 }
1720
1721 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001722}
1723
1724static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1725{
1726 int ret;
1727
1728 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001729 case KVM_S390_VM_MEM_CTRL:
1730 switch (attr->attr) {
1731 case KVM_S390_VM_MEM_ENABLE_CMMA:
1732 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001733 ret = sclp.has_cmma ? 0 : -ENXIO;
1734 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001735 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001736 ret = 0;
1737 break;
1738 default:
1739 ret = -ENXIO;
1740 break;
1741 }
1742 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001743 case KVM_S390_VM_TOD:
1744 switch (attr->attr) {
1745 case KVM_S390_VM_TOD_LOW:
1746 case KVM_S390_VM_TOD_HIGH:
1747 ret = 0;
1748 break;
1749 default:
1750 ret = -ENXIO;
1751 break;
1752 }
1753 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001754 case KVM_S390_VM_CPU_MODEL:
1755 switch (attr->attr) {
1756 case KVM_S390_VM_CPU_PROCESSOR:
1757 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001758 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1759 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001760 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001761 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001762 ret = 0;
1763 break;
1764 default:
1765 ret = -ENXIO;
1766 break;
1767 }
1768 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001769 case KVM_S390_VM_CRYPTO:
1770 switch (attr->attr) {
1771 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1772 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1773 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1774 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1775 ret = 0;
1776 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001777 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1778 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1779 ret = ap_instructions_available() ? 0 : -ENXIO;
1780 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001781 default:
1782 ret = -ENXIO;
1783 break;
1784 }
1785 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001786 case KVM_S390_VM_MIGRATION:
1787 ret = 0;
1788 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001789 default:
1790 ret = -ENXIO;
1791 break;
1792 }
1793
1794 return ret;
1795}
1796
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001797static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1798{
1799 uint8_t *keys;
1800 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001801 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001802
1803 if (args->flags != 0)
1804 return -EINVAL;
1805
1806 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001807 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001808 return KVM_S390_GET_SKEYS_NONE;
1809
1810 /* Enforce sane limit on memory allocation */
1811 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1812 return -EINVAL;
1813
Michal Hocko752ade62017-05-08 15:57:27 -07001814 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001815 if (!keys)
1816 return -ENOMEM;
1817
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001818 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001819 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001820 for (i = 0; i < args->count; i++) {
1821 hva = gfn_to_hva(kvm, args->start_gfn + i);
1822 if (kvm_is_error_hva(hva)) {
1823 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001824 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001825 }
1826
David Hildenbrand154c8c12016-05-09 11:22:34 +02001827 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1828 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001829 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001831 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001832 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001833
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001834 if (!r) {
1835 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1836 sizeof(uint8_t) * args->count);
1837 if (r)
1838 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001839 }
1840
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001841 kvfree(keys);
1842 return r;
1843}
1844
1845static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1846{
1847 uint8_t *keys;
1848 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001849 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001850 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001851
1852 if (args->flags != 0)
1853 return -EINVAL;
1854
1855 /* Enforce sane limit on memory allocation */
1856 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1857 return -EINVAL;
1858
Michal Hocko752ade62017-05-08 15:57:27 -07001859 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001860 if (!keys)
1861 return -ENOMEM;
1862
1863 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1864 sizeof(uint8_t) * args->count);
1865 if (r) {
1866 r = -EFAULT;
1867 goto out;
1868 }
1869
1870 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001871 r = s390_enable_skey();
1872 if (r)
1873 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001874
Janosch Frankbd096f62018-07-18 13:40:22 +01001875 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001876 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001877 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001878 while (i < args->count) {
1879 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001880 hva = gfn_to_hva(kvm, args->start_gfn + i);
1881 if (kvm_is_error_hva(hva)) {
1882 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001883 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001884 }
1885
1886 /* Lowest order bit is reserved */
1887 if (keys[i] & 0x01) {
1888 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001889 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001890 }
1891
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001892 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001893 if (r) {
1894 r = fixup_user_fault(current, current->mm, hva,
1895 FAULT_FLAG_WRITE, &unlocked);
1896 if (r)
1897 break;
1898 }
1899 if (!r)
1900 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001901 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001902 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001903 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001904out:
1905 kvfree(keys);
1906 return r;
1907}
1908
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001909/*
1910 * Base address and length must be sent at the start of each block, therefore
1911 * it's cheaper to send some clean data, as long as it's less than the size of
1912 * two longs.
1913 */
1914#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1915/* for consistency */
1916#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1917
1918/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001919 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1920 * address falls in a hole. In that case the index of one of the memslots
1921 * bordering the hole is returned.
1922 */
1923static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1924{
1925 int start = 0, end = slots->used_slots;
1926 int slot = atomic_read(&slots->lru_slot);
1927 struct kvm_memory_slot *memslots = slots->memslots;
1928
1929 if (gfn >= memslots[slot].base_gfn &&
1930 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1931 return slot;
1932
1933 while (start < end) {
1934 slot = start + (end - start) / 2;
1935
1936 if (gfn >= memslots[slot].base_gfn)
1937 end = slot;
1938 else
1939 start = slot + 1;
1940 }
1941
Sean Christopherson97daa022020-04-07 23:40:59 -07001942 if (start >= slots->used_slots)
1943 return slots->used_slots - 1;
1944
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001945 if (gfn >= memslots[start].base_gfn &&
1946 gfn < memslots[start].base_gfn + memslots[start].npages) {
1947 atomic_set(&slots->lru_slot, start);
1948 }
1949
1950 return start;
1951}
1952
1953static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1954 u8 *res, unsigned long bufsize)
1955{
1956 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1957
1958 args->count = 0;
1959 while (args->count < bufsize) {
1960 hva = gfn_to_hva(kvm, cur_gfn);
1961 /*
1962 * We return an error if the first value was invalid, but we
1963 * return successfully if at least one value was copied.
1964 */
1965 if (kvm_is_error_hva(hva))
1966 return args->count ? 0 : -EFAULT;
1967 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1968 pgstev = 0;
1969 res[args->count++] = (pgstev >> 24) & 0x43;
1970 cur_gfn++;
1971 }
1972
1973 return 0;
1974}
1975
1976static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1977 unsigned long cur_gfn)
1978{
1979 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1980 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1981 unsigned long ofs = cur_gfn - ms->base_gfn;
1982
1983 if (ms->base_gfn + ms->npages <= cur_gfn) {
1984 slotidx--;
1985 /* If we are above the highest slot, wrap around */
1986 if (slotidx < 0)
1987 slotidx = slots->used_slots - 1;
1988
1989 ms = slots->memslots + slotidx;
1990 ofs = 0;
1991 }
1992 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1993 while ((slotidx > 0) && (ofs >= ms->npages)) {
1994 slotidx--;
1995 ms = slots->memslots + slotidx;
1996 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1997 }
1998 return ms->base_gfn + ofs;
1999}
2000
2001static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2002 u8 *res, unsigned long bufsize)
2003{
2004 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2005 struct kvm_memslots *slots = kvm_memslots(kvm);
2006 struct kvm_memory_slot *ms;
2007
Sean Christopherson0774a962020-03-20 13:55:40 -07002008 if (unlikely(!slots->used_slots))
2009 return 0;
2010
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002011 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2012 ms = gfn_to_memslot(kvm, cur_gfn);
2013 args->count = 0;
2014 args->start_gfn = cur_gfn;
2015 if (!ms)
2016 return 0;
2017 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2018 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2019
2020 while (args->count < bufsize) {
2021 hva = gfn_to_hva(kvm, cur_gfn);
2022 if (kvm_is_error_hva(hva))
2023 return 0;
2024 /* Decrement only if we actually flipped the bit to 0 */
2025 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2026 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2027 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2028 pgstev = 0;
2029 /* Save the value */
2030 res[args->count++] = (pgstev >> 24) & 0x43;
2031 /* If the next bit is too far away, stop. */
2032 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2033 return 0;
2034 /* If we reached the previous "next", find the next one */
2035 if (cur_gfn == next_gfn)
2036 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2037 /* Reached the end of memory or of the buffer, stop */
2038 if ((next_gfn >= mem_end) ||
2039 (next_gfn - args->start_gfn >= bufsize))
2040 return 0;
2041 cur_gfn++;
2042 /* Reached the end of the current memslot, take the next one. */
2043 if (cur_gfn - ms->base_gfn >= ms->npages) {
2044 ms = gfn_to_memslot(kvm, cur_gfn);
2045 if (!ms)
2046 return 0;
2047 }
2048 }
2049 return 0;
2050}
2051
2052/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002053 * This function searches for the next page with dirty CMMA attributes, and
2054 * saves the attributes in the buffer up to either the end of the buffer or
2055 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2056 * no trailing clean bytes are saved.
2057 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2058 * output buffer will indicate 0 as length.
2059 */
2060static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2061 struct kvm_s390_cmma_log *args)
2062{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002063 unsigned long bufsize;
2064 int srcu_idx, peek, ret;
2065 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002066
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002067 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002068 return -ENXIO;
2069 /* Invalid/unsupported flags were specified */
2070 if (args->flags & ~KVM_S390_CMMA_PEEK)
2071 return -EINVAL;
2072 /* Migration mode query, and we are not doing a migration */
2073 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002074 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002075 return -EINVAL;
2076 /* CMMA is disabled or was not used, or the buffer has length zero */
2077 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002078 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002079 memset(args, 0, sizeof(*args));
2080 return 0;
2081 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002082 /* We are not peeking, and there are no dirty pages */
2083 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2084 memset(args, 0, sizeof(*args));
2085 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002086 }
2087
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002088 values = vmalloc(bufsize);
2089 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002090 return -ENOMEM;
2091
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002092 down_read(&kvm->mm->mmap_sem);
2093 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002094 if (peek)
2095 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2096 else
2097 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002098 srcu_read_unlock(&kvm->srcu, srcu_idx);
2099 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002100
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002101 if (kvm->arch.migration_mode)
2102 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2103 else
2104 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002105
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002106 if (copy_to_user((void __user *)args->values, values, args->count))
2107 ret = -EFAULT;
2108
2109 vfree(values);
2110 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002111}
2112
2113/*
2114 * This function sets the CMMA attributes for the given pages. If the input
2115 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002116 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002117 */
2118static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2119 const struct kvm_s390_cmma_log *args)
2120{
2121 unsigned long hva, mask, pgstev, i;
2122 uint8_t *bits;
2123 int srcu_idx, r = 0;
2124
2125 mask = args->mask;
2126
2127 if (!kvm->arch.use_cmma)
2128 return -ENXIO;
2129 /* invalid/unsupported flags */
2130 if (args->flags != 0)
2131 return -EINVAL;
2132 /* Enforce sane limit on memory allocation */
2133 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2134 return -EINVAL;
2135 /* Nothing to do */
2136 if (args->count == 0)
2137 return 0;
2138
Kees Cook42bc47b2018-06-12 14:27:11 -07002139 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002140 if (!bits)
2141 return -ENOMEM;
2142
2143 r = copy_from_user(bits, (void __user *)args->values, args->count);
2144 if (r) {
2145 r = -EFAULT;
2146 goto out;
2147 }
2148
2149 down_read(&kvm->mm->mmap_sem);
2150 srcu_idx = srcu_read_lock(&kvm->srcu);
2151 for (i = 0; i < args->count; i++) {
2152 hva = gfn_to_hva(kvm, args->start_gfn + i);
2153 if (kvm_is_error_hva(hva)) {
2154 r = -EFAULT;
2155 break;
2156 }
2157
2158 pgstev = bits[i];
2159 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002160 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002161 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2162 }
2163 srcu_read_unlock(&kvm->srcu, srcu_idx);
2164 up_read(&kvm->mm->mmap_sem);
2165
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002166 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002167 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002168 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002169 up_write(&kvm->mm->mmap_sem);
2170 }
2171out:
2172 vfree(bits);
2173 return r;
2174}
2175
Janosch Frank29b40f12019-09-30 04:19:18 -04002176static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2177{
2178 struct kvm_vcpu *vcpu;
2179 u16 rc, rrc;
2180 int ret = 0;
2181 int i;
2182
2183 /*
2184 * We ignore failures and try to destroy as many CPUs as possible.
2185 * At the same time we must not free the assigned resources when
2186 * this fails, as the ultravisor has still access to that memory.
2187 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2188 * behind.
2189 * We want to return the first failure rc and rrc, though.
2190 */
2191 kvm_for_each_vcpu(i, vcpu, kvm) {
2192 mutex_lock(&vcpu->mutex);
2193 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2194 *rcp = rc;
2195 *rrcp = rrc;
2196 ret = -EIO;
2197 }
2198 mutex_unlock(&vcpu->mutex);
2199 }
2200 return ret;
2201}
2202
2203static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2204{
2205 int i, r = 0;
2206 u16 dummy;
2207
2208 struct kvm_vcpu *vcpu;
2209
2210 kvm_for_each_vcpu(i, vcpu, kvm) {
2211 mutex_lock(&vcpu->mutex);
2212 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2213 mutex_unlock(&vcpu->mutex);
2214 if (r)
2215 break;
2216 }
2217 if (r)
2218 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2219 return r;
2220}
2221
2222static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2223{
2224 int r = 0;
2225 u16 dummy;
2226 void __user *argp = (void __user *)cmd->data;
2227
2228 switch (cmd->cmd) {
2229 case KVM_PV_ENABLE: {
2230 r = -EINVAL;
2231 if (kvm_s390_pv_is_protected(kvm))
2232 break;
2233
2234 /*
2235 * FMT 4 SIE needs esca. As we never switch back to bsca from
2236 * esca, we need no cleanup in the error cases below
2237 */
2238 r = sca_switch_to_extended(kvm);
2239 if (r)
2240 break;
2241
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002242 down_write(&current->mm->mmap_sem);
2243 r = gmap_mark_unmergeable();
2244 up_write(&current->mm->mmap_sem);
2245 if (r)
2246 break;
2247
Janosch Frank29b40f12019-09-30 04:19:18 -04002248 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2249 if (r)
2250 break;
2251
2252 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2253 if (r)
2254 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002255
2256 /* we need to block service interrupts from now on */
2257 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002258 break;
2259 }
2260 case KVM_PV_DISABLE: {
2261 r = -EINVAL;
2262 if (!kvm_s390_pv_is_protected(kvm))
2263 break;
2264
2265 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2266 /*
2267 * If a CPU could not be destroyed, destroy VM will also fail.
2268 * There is no point in trying to destroy it. Instead return
2269 * the rc and rrc from the first CPU that failed destroying.
2270 */
2271 if (r)
2272 break;
2273 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002274
2275 /* no need to block service interrupts any more */
2276 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002277 break;
2278 }
2279 case KVM_PV_SET_SEC_PARMS: {
2280 struct kvm_s390_pv_sec_parm parms = {};
2281 void *hdr;
2282
2283 r = -EINVAL;
2284 if (!kvm_s390_pv_is_protected(kvm))
2285 break;
2286
2287 r = -EFAULT;
2288 if (copy_from_user(&parms, argp, sizeof(parms)))
2289 break;
2290
2291 /* Currently restricted to 8KB */
2292 r = -EINVAL;
2293 if (parms.length > PAGE_SIZE * 2)
2294 break;
2295
2296 r = -ENOMEM;
2297 hdr = vmalloc(parms.length);
2298 if (!hdr)
2299 break;
2300
2301 r = -EFAULT;
2302 if (!copy_from_user(hdr, (void __user *)parms.origin,
2303 parms.length))
2304 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2305 &cmd->rc, &cmd->rrc);
2306
2307 vfree(hdr);
2308 break;
2309 }
2310 case KVM_PV_UNPACK: {
2311 struct kvm_s390_pv_unp unp = {};
2312
2313 r = -EINVAL;
2314 if (!kvm_s390_pv_is_protected(kvm))
2315 break;
2316
2317 r = -EFAULT;
2318 if (copy_from_user(&unp, argp, sizeof(unp)))
2319 break;
2320
2321 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2322 &cmd->rc, &cmd->rrc);
2323 break;
2324 }
2325 case KVM_PV_VERIFY: {
2326 r = -EINVAL;
2327 if (!kvm_s390_pv_is_protected(kvm))
2328 break;
2329
2330 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2331 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2332 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2333 cmd->rrc);
2334 break;
2335 }
Janosch Franke0d27732019-05-09 13:07:21 +02002336 case KVM_PV_PREP_RESET: {
2337 r = -EINVAL;
2338 if (!kvm_s390_pv_is_protected(kvm))
2339 break;
2340
2341 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2342 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2343 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2344 cmd->rc, cmd->rrc);
2345 break;
2346 }
2347 case KVM_PV_UNSHARE_ALL: {
2348 r = -EINVAL;
2349 if (!kvm_s390_pv_is_protected(kvm))
2350 break;
2351
2352 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2353 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2354 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2355 cmd->rc, cmd->rrc);
2356 break;
2357 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002358 default:
2359 r = -ENOTTY;
2360 }
2361 return r;
2362}
2363
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002364long kvm_arch_vm_ioctl(struct file *filp,
2365 unsigned int ioctl, unsigned long arg)
2366{
2367 struct kvm *kvm = filp->private_data;
2368 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002369 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002370 int r;
2371
2372 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002373 case KVM_S390_INTERRUPT: {
2374 struct kvm_s390_interrupt s390int;
2375
2376 r = -EFAULT;
2377 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2378 break;
2379 r = kvm_s390_inject_vm(kvm, &s390int);
2380 break;
2381 }
Cornelia Huck84223592013-07-15 13:36:01 +02002382 case KVM_CREATE_IRQCHIP: {
2383 struct kvm_irq_routing_entry routing;
2384
2385 r = -EINVAL;
2386 if (kvm->arch.use_irqchip) {
2387 /* Set up dummy routing. */
2388 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002389 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002390 }
2391 break;
2392 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002393 case KVM_SET_DEVICE_ATTR: {
2394 r = -EFAULT;
2395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2396 break;
2397 r = kvm_s390_vm_set_attr(kvm, &attr);
2398 break;
2399 }
2400 case KVM_GET_DEVICE_ATTR: {
2401 r = -EFAULT;
2402 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2403 break;
2404 r = kvm_s390_vm_get_attr(kvm, &attr);
2405 break;
2406 }
2407 case KVM_HAS_DEVICE_ATTR: {
2408 r = -EFAULT;
2409 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2410 break;
2411 r = kvm_s390_vm_has_attr(kvm, &attr);
2412 break;
2413 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002414 case KVM_S390_GET_SKEYS: {
2415 struct kvm_s390_skeys args;
2416
2417 r = -EFAULT;
2418 if (copy_from_user(&args, argp,
2419 sizeof(struct kvm_s390_skeys)))
2420 break;
2421 r = kvm_s390_get_skeys(kvm, &args);
2422 break;
2423 }
2424 case KVM_S390_SET_SKEYS: {
2425 struct kvm_s390_skeys args;
2426
2427 r = -EFAULT;
2428 if (copy_from_user(&args, argp,
2429 sizeof(struct kvm_s390_skeys)))
2430 break;
2431 r = kvm_s390_set_skeys(kvm, &args);
2432 break;
2433 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002434 case KVM_S390_GET_CMMA_BITS: {
2435 struct kvm_s390_cmma_log args;
2436
2437 r = -EFAULT;
2438 if (copy_from_user(&args, argp, sizeof(args)))
2439 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002440 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002441 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002442 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002443 if (!r) {
2444 r = copy_to_user(argp, &args, sizeof(args));
2445 if (r)
2446 r = -EFAULT;
2447 }
2448 break;
2449 }
2450 case KVM_S390_SET_CMMA_BITS: {
2451 struct kvm_s390_cmma_log args;
2452
2453 r = -EFAULT;
2454 if (copy_from_user(&args, argp, sizeof(args)))
2455 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002456 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002457 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002458 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002459 break;
2460 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002461 case KVM_S390_PV_COMMAND: {
2462 struct kvm_pv_cmd args;
2463
Janosch Frankfe28c7862019-05-15 13:24:30 +02002464 /* protvirt means user sigp */
2465 kvm->arch.user_cpu_state_ctrl = 1;
Janosch Frank29b40f12019-09-30 04:19:18 -04002466 r = 0;
2467 if (!is_prot_virt_host()) {
2468 r = -EINVAL;
2469 break;
2470 }
2471 if (copy_from_user(&args, argp, sizeof(args))) {
2472 r = -EFAULT;
2473 break;
2474 }
2475 if (args.flags) {
2476 r = -EINVAL;
2477 break;
2478 }
2479 mutex_lock(&kvm->lock);
2480 r = kvm_s390_handle_pv(kvm, &args);
2481 mutex_unlock(&kvm->lock);
2482 if (copy_to_user(argp, &args, sizeof(args))) {
2483 r = -EFAULT;
2484 break;
2485 }
2486 break;
2487 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002488 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002489 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002490 }
2491
2492 return r;
2493}
2494
Tony Krowiak45c9b472015-01-13 11:33:26 -05002495static int kvm_s390_apxa_installed(void)
2496{
Tony Krowiake585b242018-09-25 19:16:18 -04002497 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002498
Tony Krowiake585b242018-09-25 19:16:18 -04002499 if (ap_instructions_available()) {
2500 if (ap_qci(&info) == 0)
2501 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002502 }
2503
2504 return 0;
2505}
2506
Tony Krowiake585b242018-09-25 19:16:18 -04002507/*
2508 * The format of the crypto control block (CRYCB) is specified in the 3 low
2509 * order bits of the CRYCB designation (CRYCBD) field as follows:
2510 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2511 * AP extended addressing (APXA) facility are installed.
2512 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2513 * Format 2: Both the APXA and MSAX3 facilities are installed
2514 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002515static void kvm_s390_set_crycb_format(struct kvm *kvm)
2516{
2517 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2518
Tony Krowiake585b242018-09-25 19:16:18 -04002519 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2520 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2521
2522 /* Check whether MSAX3 is installed */
2523 if (!test_kvm_facility(kvm, 76))
2524 return;
2525
Tony Krowiak45c9b472015-01-13 11:33:26 -05002526 if (kvm_s390_apxa_installed())
2527 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2528 else
2529 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2530}
2531
Pierre Morel0e237e42018-10-05 10:31:09 +02002532void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2533 unsigned long *aqm, unsigned long *adm)
2534{
2535 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2536
2537 mutex_lock(&kvm->lock);
2538 kvm_s390_vcpu_block_all(kvm);
2539
2540 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2541 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2542 memcpy(crycb->apcb1.apm, apm, 32);
2543 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2544 apm[0], apm[1], apm[2], apm[3]);
2545 memcpy(crycb->apcb1.aqm, aqm, 32);
2546 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2547 aqm[0], aqm[1], aqm[2], aqm[3]);
2548 memcpy(crycb->apcb1.adm, adm, 32);
2549 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2550 adm[0], adm[1], adm[2], adm[3]);
2551 break;
2552 case CRYCB_FORMAT1:
2553 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2554 memcpy(crycb->apcb0.apm, apm, 8);
2555 memcpy(crycb->apcb0.aqm, aqm, 2);
2556 memcpy(crycb->apcb0.adm, adm, 2);
2557 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2558 apm[0], *((unsigned short *)aqm),
2559 *((unsigned short *)adm));
2560 break;
2561 default: /* Can not happen */
2562 break;
2563 }
2564
2565 /* recreate the shadow crycb for each vcpu */
2566 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2567 kvm_s390_vcpu_unblock_all(kvm);
2568 mutex_unlock(&kvm->lock);
2569}
2570EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2571
Tony Krowiak421045982018-09-25 19:16:25 -04002572void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2573{
2574 mutex_lock(&kvm->lock);
2575 kvm_s390_vcpu_block_all(kvm);
2576
2577 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2578 sizeof(kvm->arch.crypto.crycb->apcb0));
2579 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2580 sizeof(kvm->arch.crypto.crycb->apcb1));
2581
Pierre Morel0e237e42018-10-05 10:31:09 +02002582 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002583 /* recreate the shadow crycb for each vcpu */
2584 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002585 kvm_s390_vcpu_unblock_all(kvm);
2586 mutex_unlock(&kvm->lock);
2587}
2588EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2589
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002590static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002591{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002592 struct cpuid cpuid;
2593
2594 get_cpu_id(&cpuid);
2595 cpuid.version = 0xff;
2596 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002597}
2598
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002599static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002600{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002601 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002602 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002603
Tony Krowiake585b242018-09-25 19:16:18 -04002604 if (!test_kvm_facility(kvm, 76))
2605 return;
2606
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002607 /* Enable AES/DEA protected key functions by default */
2608 kvm->arch.crypto.aes_kw = 1;
2609 kvm->arch.crypto.dea_kw = 1;
2610 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2611 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2612 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2613 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002614}
2615
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002616static void sca_dispose(struct kvm *kvm)
2617{
2618 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002619 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002620 else
2621 free_page((unsigned long)(kvm->arch.sca));
2622 kvm->arch.sca = NULL;
2623}
2624
Carsten Ottee08b9632012-01-04 10:25:20 +01002625int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002626{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002627 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002628 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002629 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002630 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002631
Carsten Ottee08b9632012-01-04 10:25:20 +01002632 rc = -EINVAL;
2633#ifdef CONFIG_KVM_S390_UCONTROL
2634 if (type & ~KVM_VM_S390_UCONTROL)
2635 goto out_err;
2636 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2637 goto out_err;
2638#else
2639 if (type)
2640 goto out_err;
2641#endif
2642
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643 rc = s390_enable_sie();
2644 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002645 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002646
Carsten Otteb2904112011-10-18 12:27:13 +02002647 rc = -ENOMEM;
2648
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002649 if (!sclp.has_64bscao)
2650 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002651 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002652 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002653 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002654 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002655 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002656 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002657 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002658 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002659 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002660 kvm->arch.sca = (struct bsca_block *)
2661 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002662 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002663
2664 sprintf(debug_name, "kvm-%u", current->pid);
2665
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002666 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002667 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002668 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002669
Michael Mueller19114be2017-05-30 14:26:02 +02002670 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002671 kvm->arch.sie_page2 =
2672 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2673 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002674 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002675
Michael Mueller25c84db2019-01-31 09:52:41 +01002676 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002677 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002678
2679 for (i = 0; i < kvm_s390_fac_size(); i++) {
2680 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2681 (kvm_s390_fac_base[i] |
2682 kvm_s390_fac_ext[i]);
2683 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2684 kvm_s390_fac_base[i];
2685 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002686 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002687
David Hildenbrand19352222017-08-29 16:31:08 +02002688 /* we are always in czam mode - even on pre z14 machines */
2689 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2690 set_kvm_facility(kvm->arch.model.fac_list, 138);
2691 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002692 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2693 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002694 if (MACHINE_HAS_TLB_GUEST) {
2695 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2696 set_kvm_facility(kvm->arch.model.fac_list, 147);
2697 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002698
Pierre Morel05f31e32019-05-21 17:34:37 +02002699 if (css_general_characteristics.aiv && test_facility(65))
2700 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2701
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002702 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002703 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002704
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002705 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002706
Fei Li51978392017-02-17 17:06:26 +08002707 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002708 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002709 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2710 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002711 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002712 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002713
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002714 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002715 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002716
Carsten Ottee08b9632012-01-04 10:25:20 +01002717 if (type & KVM_VM_S390_UCONTROL) {
2718 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002719 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002720 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002721 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002722 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002723 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002724 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002725 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002726 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002727 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002728 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002729 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002730 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002731 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002732
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002733 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002734 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002735 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002736 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002737 if (use_gisa)
2738 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002739 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002740
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002741 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002742out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002743 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002744 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002745 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002746 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002747 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002748}
2749
Christian Borntraegerd329c032008-11-26 14:50:27 +01002750void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2751{
Janosch Frank29b40f12019-09-30 04:19:18 -04002752 u16 rc, rrc;
2753
Christian Borntraegerd329c032008-11-26 14:50:27 +01002754 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002755 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002756 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002757 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002758 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002759 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002760
2761 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002762 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002763
Dominik Dingele6db1d62015-05-07 15:41:57 +02002764 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002765 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002766 /* We can not hold the vcpu mutex here, we are already dying */
2767 if (kvm_s390_pv_cpu_get_handle(vcpu))
2768 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002769 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002770}
2771
2772static void kvm_free_vcpus(struct kvm *kvm)
2773{
2774 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002775 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002776
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002777 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002778 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002779
2780 mutex_lock(&kvm->lock);
2781 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2782 kvm->vcpus[i] = NULL;
2783
2784 atomic_set(&kvm->online_vcpus, 0);
2785 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002786}
2787
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002788void kvm_arch_destroy_vm(struct kvm *kvm)
2789{
Janosch Frank29b40f12019-09-30 04:19:18 -04002790 u16 rc, rrc;
2791
Christian Borntraegerd329c032008-11-26 14:50:27 +01002792 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002793 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002794 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002795 /*
2796 * We are already at the end of life and kvm->lock is not taken.
2797 * This is ok as the file descriptor is closed by now and nobody
2798 * can mess with the pv state. To avoid lockdep_assert_held from
2799 * complaining we do not use kvm_s390_pv_is_protected.
2800 */
2801 if (kvm_s390_pv_get_handle(kvm))
2802 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2803 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002804 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002805 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002806 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002807 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002808 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002809 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002810 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002811}
2812
2813/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002814static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2815{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002816 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002817 if (!vcpu->arch.gmap)
2818 return -ENOMEM;
2819 vcpu->arch.gmap->private = vcpu->kvm;
2820
2821 return 0;
2822}
2823
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002824static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2825{
David Hildenbranda6940672016-08-08 22:39:32 +02002826 if (!kvm_s390_use_sca_entries())
2827 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002828 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002829 if (vcpu->kvm->arch.use_esca) {
2830 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002831
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002832 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002833 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002834 } else {
2835 struct bsca_block *sca = vcpu->kvm->arch.sca;
2836
2837 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002838 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002839 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002840 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002841}
2842
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002843static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002844{
David Hildenbranda6940672016-08-08 22:39:32 +02002845 if (!kvm_s390_use_sca_entries()) {
2846 struct bsca_block *sca = vcpu->kvm->arch.sca;
2847
2848 /* we still need the basic sca for the ipte control */
2849 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2850 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002851 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002852 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002853 read_lock(&vcpu->kvm->arch.sca_lock);
2854 if (vcpu->kvm->arch.use_esca) {
2855 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002856
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002857 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002858 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2859 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002860 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002861 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002862 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002863 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002864
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002865 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002866 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2867 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002868 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002869 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002870 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002871}
2872
2873/* Basic SCA to Extended SCA data copy routines */
2874static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2875{
2876 d->sda = s->sda;
2877 d->sigp_ctrl.c = s->sigp_ctrl.c;
2878 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2879}
2880
2881static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2882{
2883 int i;
2884
2885 d->ipte_control = s->ipte_control;
2886 d->mcn[0] = s->mcn;
2887 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2888 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2889}
2890
2891static int sca_switch_to_extended(struct kvm *kvm)
2892{
2893 struct bsca_block *old_sca = kvm->arch.sca;
2894 struct esca_block *new_sca;
2895 struct kvm_vcpu *vcpu;
2896 unsigned int vcpu_idx;
2897 u32 scaol, scaoh;
2898
Janosch Frank29b40f12019-09-30 04:19:18 -04002899 if (kvm->arch.use_esca)
2900 return 0;
2901
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002902 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2903 if (!new_sca)
2904 return -ENOMEM;
2905
2906 scaoh = (u32)((u64)(new_sca) >> 32);
2907 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2908
2909 kvm_s390_vcpu_block_all(kvm);
2910 write_lock(&kvm->arch.sca_lock);
2911
2912 sca_copy_b_to_e(new_sca, old_sca);
2913
2914 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2915 vcpu->arch.sie_block->scaoh = scaoh;
2916 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002917 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002918 }
2919 kvm->arch.sca = new_sca;
2920 kvm->arch.use_esca = 1;
2921
2922 write_unlock(&kvm->arch.sca_lock);
2923 kvm_s390_vcpu_unblock_all(kvm);
2924
2925 free_page((unsigned long)old_sca);
2926
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002927 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2928 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002929 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002930}
2931
2932static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2933{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002934 int rc;
2935
David Hildenbranda6940672016-08-08 22:39:32 +02002936 if (!kvm_s390_use_sca_entries()) {
2937 if (id < KVM_MAX_VCPUS)
2938 return true;
2939 return false;
2940 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002941 if (id < KVM_S390_BSCA_CPU_SLOTS)
2942 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002943 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002944 return false;
2945
2946 mutex_lock(&kvm->lock);
2947 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2948 mutex_unlock(&kvm->lock);
2949
2950 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002951}
2952
David Hildenbranddb0758b2016-02-15 09:42:25 +01002953/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2954static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2955{
2956 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002957 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002958 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002959 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002960}
2961
2962/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2963static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2964{
2965 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002966 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002967 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2968 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002969 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002970}
2971
2972/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2973static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2974{
2975 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2976 vcpu->arch.cputm_enabled = true;
2977 __start_cpu_timer_accounting(vcpu);
2978}
2979
2980/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2981static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2982{
2983 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2984 __stop_cpu_timer_accounting(vcpu);
2985 vcpu->arch.cputm_enabled = false;
2986}
2987
2988static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2989{
2990 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2991 __enable_cpu_timer_accounting(vcpu);
2992 preempt_enable();
2993}
2994
2995static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2996{
2997 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2998 __disable_cpu_timer_accounting(vcpu);
2999 preempt_enable();
3000}
3001
David Hildenbrand4287f242016-02-15 09:40:12 +01003002/* set the cpu timer - may only be called from the VCPU thread itself */
3003void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3004{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003005 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003006 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003007 if (vcpu->arch.cputm_enabled)
3008 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003009 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003010 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003011 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003012}
3013
David Hildenbranddb0758b2016-02-15 09:42:25 +01003014/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003015__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3016{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003017 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003018 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003019
3020 if (unlikely(!vcpu->arch.cputm_enabled))
3021 return vcpu->arch.sie_block->cputm;
3022
David Hildenbrand9c23a132016-02-17 21:53:33 +01003023 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3024 do {
3025 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3026 /*
3027 * If the writer would ever execute a read in the critical
3028 * section, e.g. in irq context, we have a deadlock.
3029 */
3030 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3031 value = vcpu->arch.sie_block->cputm;
3032 /* if cputm_start is 0, accounting is being started/stopped */
3033 if (likely(vcpu->arch.cputm_start))
3034 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3035 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3036 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003037 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003038}
3039
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003040void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3041{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003042
David Hildenbrand37d9df92015-03-11 16:47:33 +01003043 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003044 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003045 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003046 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003047 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003048}
3049
3050void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3051{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003052 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003053 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003054 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003055 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003056 vcpu->arch.enabled_gmap = gmap_get_enabled();
3057 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003058
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003059}
3060
Dominik Dingel31928aa2014-12-04 15:47:07 +01003061void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003062{
Jason J. Herne72f25022014-11-25 09:46:02 -05003063 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003064 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003065 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003066 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003067 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003068 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003069 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003070 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003071 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003072 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003073 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3074 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003075 /* make vcpu_load load the right gmap on the first trigger */
3076 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003077}
3078
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003079static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3080{
3081 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3082 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3083 return true;
3084 return false;
3085}
3086
3087static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3088{
3089 /* At least one ECC subfunction must be present */
3090 return kvm_has_pckmo_subfunc(kvm, 32) ||
3091 kvm_has_pckmo_subfunc(kvm, 33) ||
3092 kvm_has_pckmo_subfunc(kvm, 34) ||
3093 kvm_has_pckmo_subfunc(kvm, 40) ||
3094 kvm_has_pckmo_subfunc(kvm, 41);
3095
3096}
3097
Tony Krowiak5102ee82014-06-27 14:46:01 -04003098static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3099{
Tony Krowiake585b242018-09-25 19:16:18 -04003100 /*
3101 * If the AP instructions are not being interpreted and the MSAX3
3102 * facility is not configured for the guest, there is nothing to set up.
3103 */
3104 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003105 return;
3106
Tony Krowiake585b242018-09-25 19:16:18 -04003107 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003108 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003109 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003110 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003111
Tony Krowiake585b242018-09-25 19:16:18 -04003112 if (vcpu->kvm->arch.crypto.apie)
3113 vcpu->arch.sie_block->eca |= ECA_APIE;
3114
3115 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003116 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003117 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003118 /* ecc is also wrapped with AES key */
3119 if (kvm_has_pckmo_ecc(vcpu->kvm))
3120 vcpu->arch.sie_block->ecd |= ECD_ECC;
3121 }
3122
Tony Krowiaka374e892014-09-03 10:13:53 +02003123 if (vcpu->kvm->arch.crypto.dea_kw)
3124 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003125}
3126
Dominik Dingelb31605c2014-03-25 13:47:11 +01003127void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3128{
3129 free_page(vcpu->arch.sie_block->cbrlo);
3130 vcpu->arch.sie_block->cbrlo = 0;
3131}
3132
3133int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3134{
3135 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
3136 if (!vcpu->arch.sie_block->cbrlo)
3137 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003138 return 0;
3139}
3140
Michael Mueller91520f12015-02-27 14:32:11 +01003141static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3142{
3143 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3144
Michael Mueller91520f12015-02-27 14:32:11 +01003145 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003146 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003147 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003148}
3149
Sean Christophersonff72bb52019-12-18 13:55:20 -08003150static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3151{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003152 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003153 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003154
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003155 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3156 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003157 CPUSTAT_STOPPED);
3158
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003159 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003160 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003161 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003162 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003163
Michael Mueller91520f12015-02-27 14:32:11 +01003164 kvm_s390_vcpu_setup_model(vcpu);
3165
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003166 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3167 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003168 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003169 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003170 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003171 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003172 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003173
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003174 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003175 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003176 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003177 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3178 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003179 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003180 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003181 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003182 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003183 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003184 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003185 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003186 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003187 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003188 vcpu->arch.sie_block->eca |= ECA_VX;
3189 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003190 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003191 if (test_kvm_facility(vcpu->kvm, 139))
3192 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003193 if (test_kvm_facility(vcpu->kvm, 156))
3194 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003195 if (vcpu->arch.sie_block->gd) {
3196 vcpu->arch.sie_block->eca |= ECA_AIV;
3197 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3198 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3199 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003200 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3201 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003202 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003203
3204 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003205 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003206 else
3207 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003208
Dominik Dingele6db1d62015-05-07 15:41:57 +02003209 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003210 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3211 if (rc)
3212 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003213 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003214 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003215 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003216
Collin Walling67d49d52018-08-31 12:51:19 -04003217 vcpu->arch.sie_block->hpid = HPID_KVM;
3218
Tony Krowiak5102ee82014-06-27 14:46:01 -04003219 kvm_s390_vcpu_crypto_setup(vcpu);
3220
Janosch Frank29b40f12019-09-30 04:19:18 -04003221 mutex_lock(&vcpu->kvm->lock);
3222 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3223 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3224 if (rc)
3225 kvm_s390_vcpu_unsetup_cmma(vcpu);
3226 }
3227 mutex_unlock(&vcpu->kvm->lock);
3228
Dominik Dingelb31605c2014-03-25 13:47:11 +01003229 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003230}
3231
Sean Christopherson897cc382019-12-18 13:55:09 -08003232int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3233{
3234 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3235 return -EINVAL;
3236 return 0;
3237}
3238
Sean Christophersone529ef62019-12-18 13:55:15 -08003239int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003240{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003241 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003242 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003243
QingFeng Haoda72ca42017-06-07 11:41:19 +02003244 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003245 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3246 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003247 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003248
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003249 vcpu->arch.sie_block = &sie_page->sie_block;
3250 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3251
David Hildenbrandefed1102015-04-16 12:32:41 +02003252 /* the real guest size will always be smaller than msl */
3253 vcpu->arch.sie_block->mso = 0;
3254 vcpu->arch.sie_block->msl = sclp.hamax;
3255
Sean Christophersone529ef62019-12-18 13:55:15 -08003256 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003257 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003258 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003259 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3260 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003261 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003262
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003263 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3264 kvm_clear_async_pf_completion_queue(vcpu);
3265 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3266 KVM_SYNC_GPRS |
3267 KVM_SYNC_ACRS |
3268 KVM_SYNC_CRS |
3269 KVM_SYNC_ARCH0 |
3270 KVM_SYNC_PFAULT;
3271 kvm_s390_set_prefix(vcpu, 0);
3272 if (test_kvm_facility(vcpu->kvm, 64))
3273 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3274 if (test_kvm_facility(vcpu->kvm, 82))
3275 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3276 if (test_kvm_facility(vcpu->kvm, 133))
3277 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3278 if (test_kvm_facility(vcpu->kvm, 156))
3279 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3280 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3281 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3282 */
3283 if (MACHINE_HAS_VX)
3284 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3285 else
3286 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3287
3288 if (kvm_is_ucontrol(vcpu->kvm)) {
3289 rc = __kvm_ucontrol_vcpu_init(vcpu);
3290 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003291 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003292 }
3293
Sean Christophersone529ef62019-12-18 13:55:15 -08003294 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3295 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3296 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003297
Sean Christophersonff72bb52019-12-18 13:55:20 -08003298 rc = kvm_s390_vcpu_setup(vcpu);
3299 if (rc)
3300 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003301 return 0;
3302
Sean Christophersonff72bb52019-12-18 13:55:20 -08003303out_ucontrol_uninit:
3304 if (kvm_is_ucontrol(vcpu->kvm))
3305 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003306out_free_sie_block:
3307 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003308 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003309}
3310
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003311int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3312{
David Hildenbrand9a022062014-08-05 17:40:47 +02003313 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003314}
3315
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003316bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3317{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003318 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003319}
3320
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003321void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003322{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003323 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003324 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003325}
3326
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003327void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003328{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003329 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003330}
3331
Christian Borntraeger8e236542015-04-09 13:49:04 +02003332static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3333{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003334 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003335 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003336}
3337
David Hildenbrand9ea59722018-09-25 19:16:16 -04003338bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3339{
3340 return atomic_read(&vcpu->arch.sie_block->prog20) &
3341 (PROG_BLOCK_SIE | PROG_REQUEST);
3342}
3343
Christian Borntraeger8e236542015-04-09 13:49:04 +02003344static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3345{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003346 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003347}
3348
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003349/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003350 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003351 * If the CPU is not running (e.g. waiting as idle) the function will
3352 * return immediately. */
3353void exit_sie(struct kvm_vcpu *vcpu)
3354{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003355 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003356 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003357 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3358 cpu_relax();
3359}
3360
Christian Borntraeger8e236542015-04-09 13:49:04 +02003361/* Kick a guest cpu out of SIE to process a request synchronously */
3362void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003363{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003364 kvm_make_request(req, vcpu);
3365 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003366}
3367
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003368static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3369 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003370{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003371 struct kvm *kvm = gmap->private;
3372 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003373 unsigned long prefix;
3374 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003375
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003376 if (gmap_is_shadow(gmap))
3377 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003378 if (start >= 1UL << 31)
3379 /* We are only interested in prefix pages */
3380 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003381 kvm_for_each_vcpu(i, vcpu, kvm) {
3382 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003383 prefix = kvm_s390_get_prefix(vcpu);
3384 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3385 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3386 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003387 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003388 }
3389 }
3390}
3391
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003392bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3393{
3394 /* do not poll with more than halt_poll_max_steal percent of steal time */
3395 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3396 halt_poll_max_steal) {
3397 vcpu->stat.halt_no_poll_steal++;
3398 return true;
3399 }
3400 return false;
3401}
3402
Christoffer Dallb6d33832012-03-08 16:44:24 -05003403int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3404{
3405 /* kvm common code refers to this, but never calls it */
3406 BUG();
3407 return 0;
3408}
3409
Carsten Otte14eebd92012-05-15 14:15:26 +02003410static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3411 struct kvm_one_reg *reg)
3412{
3413 int r = -EINVAL;
3414
3415 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003416 case KVM_REG_S390_TODPR:
3417 r = put_user(vcpu->arch.sie_block->todpr,
3418 (u32 __user *)reg->addr);
3419 break;
3420 case KVM_REG_S390_EPOCHDIFF:
3421 r = put_user(vcpu->arch.sie_block->epoch,
3422 (u64 __user *)reg->addr);
3423 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003424 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003425 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003426 (u64 __user *)reg->addr);
3427 break;
3428 case KVM_REG_S390_CLOCK_COMP:
3429 r = put_user(vcpu->arch.sie_block->ckc,
3430 (u64 __user *)reg->addr);
3431 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003432 case KVM_REG_S390_PFTOKEN:
3433 r = put_user(vcpu->arch.pfault_token,
3434 (u64 __user *)reg->addr);
3435 break;
3436 case KVM_REG_S390_PFCOMPARE:
3437 r = put_user(vcpu->arch.pfault_compare,
3438 (u64 __user *)reg->addr);
3439 break;
3440 case KVM_REG_S390_PFSELECT:
3441 r = put_user(vcpu->arch.pfault_select,
3442 (u64 __user *)reg->addr);
3443 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003444 case KVM_REG_S390_PP:
3445 r = put_user(vcpu->arch.sie_block->pp,
3446 (u64 __user *)reg->addr);
3447 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003448 case KVM_REG_S390_GBEA:
3449 r = put_user(vcpu->arch.sie_block->gbea,
3450 (u64 __user *)reg->addr);
3451 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003452 default:
3453 break;
3454 }
3455
3456 return r;
3457}
3458
3459static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3460 struct kvm_one_reg *reg)
3461{
3462 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003463 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003464
3465 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003466 case KVM_REG_S390_TODPR:
3467 r = get_user(vcpu->arch.sie_block->todpr,
3468 (u32 __user *)reg->addr);
3469 break;
3470 case KVM_REG_S390_EPOCHDIFF:
3471 r = get_user(vcpu->arch.sie_block->epoch,
3472 (u64 __user *)reg->addr);
3473 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003474 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003475 r = get_user(val, (u64 __user *)reg->addr);
3476 if (!r)
3477 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003478 break;
3479 case KVM_REG_S390_CLOCK_COMP:
3480 r = get_user(vcpu->arch.sie_block->ckc,
3481 (u64 __user *)reg->addr);
3482 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003483 case KVM_REG_S390_PFTOKEN:
3484 r = get_user(vcpu->arch.pfault_token,
3485 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003486 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3487 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003488 break;
3489 case KVM_REG_S390_PFCOMPARE:
3490 r = get_user(vcpu->arch.pfault_compare,
3491 (u64 __user *)reg->addr);
3492 break;
3493 case KVM_REG_S390_PFSELECT:
3494 r = get_user(vcpu->arch.pfault_select,
3495 (u64 __user *)reg->addr);
3496 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003497 case KVM_REG_S390_PP:
3498 r = get_user(vcpu->arch.sie_block->pp,
3499 (u64 __user *)reg->addr);
3500 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003501 case KVM_REG_S390_GBEA:
3502 r = get_user(vcpu->arch.sie_block->gbea,
3503 (u64 __user *)reg->addr);
3504 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003505 default:
3506 break;
3507 }
3508
3509 return r;
3510}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003511
Janosch Frank7de3f142020-01-31 05:02:02 -05003512static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003513{
Janosch Frank7de3f142020-01-31 05:02:02 -05003514 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3515 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3516 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3517
3518 kvm_clear_async_pf_completion_queue(vcpu);
3519 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3520 kvm_s390_vcpu_stop(vcpu);
3521 kvm_s390_clear_local_irqs(vcpu);
3522}
3523
3524static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3525{
3526 /* Initial reset is a superset of the normal reset */
3527 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3528
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003529 /*
3530 * This equals initial cpu reset in pop, but we don't switch to ESA.
3531 * We do not only reset the internal data, but also ...
3532 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003533 vcpu->arch.sie_block->gpsw.mask = 0;
3534 vcpu->arch.sie_block->gpsw.addr = 0;
3535 kvm_s390_set_prefix(vcpu, 0);
3536 kvm_s390_set_cpu_timer(vcpu, 0);
3537 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003538 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3539 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3540 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003541
3542 /* ... the data in sync regs */
3543 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3544 vcpu->run->s.regs.ckc = 0;
3545 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3546 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3547 vcpu->run->psw_addr = 0;
3548 vcpu->run->psw_mask = 0;
3549 vcpu->run->s.regs.todpr = 0;
3550 vcpu->run->s.regs.cputm = 0;
3551 vcpu->run->s.regs.ckc = 0;
3552 vcpu->run->s.regs.pp = 0;
3553 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003554 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003555 /*
3556 * Do not reset these registers in the protected case, as some of
3557 * them are overlayed and they are not accessible in this case
3558 * anyway.
3559 */
3560 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3561 vcpu->arch.sie_block->gbea = 1;
3562 vcpu->arch.sie_block->pp = 0;
3563 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3564 vcpu->arch.sie_block->todpr = 0;
3565 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003566}
3567
3568static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3569{
3570 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3571
3572 /* Clear reset is a superset of the initial reset */
3573 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3574
3575 memset(&regs->gprs, 0, sizeof(regs->gprs));
3576 memset(&regs->vrs, 0, sizeof(regs->vrs));
3577 memset(&regs->acrs, 0, sizeof(regs->acrs));
3578 memset(&regs->gscb, 0, sizeof(regs->gscb));
3579
3580 regs->etoken = 0;
3581 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003582}
3583
3584int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3585{
Christoffer Dall875656f2017-12-04 21:35:27 +01003586 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003587 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003588 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003589 return 0;
3590}
3591
3592int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3593{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003594 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003595 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003596 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003597 return 0;
3598}
3599
3600int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3601 struct kvm_sregs *sregs)
3602{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003603 vcpu_load(vcpu);
3604
Christian Borntraeger59674c12012-01-11 11:20:33 +01003605 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003606 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003607
3608 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003609 return 0;
3610}
3611
3612int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3613 struct kvm_sregs *sregs)
3614{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003615 vcpu_load(vcpu);
3616
Christian Borntraeger59674c12012-01-11 11:20:33 +01003617 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003618 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003619
3620 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003621 return 0;
3622}
3623
3624int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3625{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003626 int ret = 0;
3627
3628 vcpu_load(vcpu);
3629
3630 if (test_fp_ctl(fpu->fpc)) {
3631 ret = -EINVAL;
3632 goto out;
3633 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003634 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003635 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003636 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3637 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003638 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003639 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003640
3641out:
3642 vcpu_put(vcpu);
3643 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003644}
3645
3646int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3647{
Christoffer Dall13931232017-12-04 21:35:34 +01003648 vcpu_load(vcpu);
3649
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003650 /* make sure we have the latest values */
3651 save_fpu_regs();
3652 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003653 convert_vx_to_fp((freg_t *) fpu->fprs,
3654 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003655 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003656 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003657 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003658
3659 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003660 return 0;
3661}
3662
3663static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3664{
3665 int rc = 0;
3666
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003667 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003668 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003669 else {
3670 vcpu->run->psw_mask = psw.mask;
3671 vcpu->run->psw_addr = psw.addr;
3672 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003673 return rc;
3674}
3675
3676int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3677 struct kvm_translation *tr)
3678{
3679 return -EINVAL; /* not implemented yet */
3680}
3681
David Hildenbrand27291e22014-01-23 12:26:52 +01003682#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3683 KVM_GUESTDBG_USE_HW_BP | \
3684 KVM_GUESTDBG_ENABLE)
3685
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003686int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3687 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003688{
David Hildenbrand27291e22014-01-23 12:26:52 +01003689 int rc = 0;
3690
Christoffer Dall66b56562017-12-04 21:35:33 +01003691 vcpu_load(vcpu);
3692
David Hildenbrand27291e22014-01-23 12:26:52 +01003693 vcpu->guest_debug = 0;
3694 kvm_s390_clear_bp_data(vcpu);
3695
Christoffer Dall66b56562017-12-04 21:35:33 +01003696 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3697 rc = -EINVAL;
3698 goto out;
3699 }
3700 if (!sclp.has_gpere) {
3701 rc = -EINVAL;
3702 goto out;
3703 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003704
3705 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3706 vcpu->guest_debug = dbg->control;
3707 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003708 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003709
3710 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3711 rc = kvm_s390_import_bp_data(vcpu, dbg);
3712 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003713 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003714 vcpu->arch.guestdbg.last_bp = 0;
3715 }
3716
3717 if (rc) {
3718 vcpu->guest_debug = 0;
3719 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003720 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003721 }
3722
Christoffer Dall66b56562017-12-04 21:35:33 +01003723out:
3724 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003725 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726}
3727
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003728int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3729 struct kvm_mp_state *mp_state)
3730{
Christoffer Dallfd232562017-12-04 21:35:30 +01003731 int ret;
3732
3733 vcpu_load(vcpu);
3734
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003735 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003736 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3737 KVM_MP_STATE_OPERATING;
3738
3739 vcpu_put(vcpu);
3740 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003741}
3742
3743int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3744 struct kvm_mp_state *mp_state)
3745{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003746 int rc = 0;
3747
Christoffer Dalle83dff52017-12-04 21:35:31 +01003748 vcpu_load(vcpu);
3749
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003750 /* user space knows about this interface - let it control the state */
3751 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3752
3753 switch (mp_state->mp_state) {
3754 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003755 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003756 break;
3757 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003758 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003759 break;
3760 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003761 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3762 rc = -ENXIO;
3763 break;
3764 }
3765 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3766 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003767 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003768 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003769 default:
3770 rc = -ENXIO;
3771 }
3772
Christoffer Dalle83dff52017-12-04 21:35:31 +01003773 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003774 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003775}
3776
David Hildenbrand8ad35752014-03-14 11:00:21 +01003777static bool ibs_enabled(struct kvm_vcpu *vcpu)
3778{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003779 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003780}
3781
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003782static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3783{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003784retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003785 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003786 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003787 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003788 /*
3789 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003790 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003791 * This ensures that the ipte instruction for this request has
3792 * already finished. We might race against a second unmapper that
3793 * wants to set the blocking bit. Lets just retry the request loop.
3794 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003795 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003796 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003797 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3798 kvm_s390_get_prefix(vcpu),
3799 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003800 if (rc) {
3801 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003802 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003803 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003804 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003805 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003806
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003807 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3808 vcpu->arch.sie_block->ihcpu = 0xffff;
3809 goto retry;
3810 }
3811
David Hildenbrand8ad35752014-03-14 11:00:21 +01003812 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3813 if (!ibs_enabled(vcpu)) {
3814 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003815 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003816 }
3817 goto retry;
3818 }
3819
3820 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3821 if (ibs_enabled(vcpu)) {
3822 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003823 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003824 }
3825 goto retry;
3826 }
3827
David Hildenbrand6502a342016-06-21 14:19:51 +02003828 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3829 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3830 goto retry;
3831 }
3832
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003833 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3834 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003835 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003836 * instruction manually, in order to provide additional
3837 * functionalities needed for live migration.
3838 */
3839 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3840 goto retry;
3841 }
3842
3843 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3844 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003845 * Re-enable CMM virtualization if CMMA is available and
3846 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003847 */
3848 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003849 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003850 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3851 goto retry;
3852 }
3853
David Hildenbrand0759d062014-05-13 16:54:32 +02003854 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003855 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003856 /* we left the vsie handler, nothing to do, just clear the request */
3857 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003858
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003859 return 0;
3860}
3861
David Hildenbrand0e7def52018-02-07 12:46:43 +01003862void kvm_s390_set_tod_clock(struct kvm *kvm,
3863 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003864{
3865 struct kvm_vcpu *vcpu;
3866 struct kvm_s390_tod_clock_ext htod;
3867 int i;
3868
3869 mutex_lock(&kvm->lock);
3870 preempt_disable();
3871
3872 get_tod_clock_ext((char *)&htod);
3873
3874 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003875 kvm->arch.epdx = 0;
3876 if (test_kvm_facility(kvm, 139)) {
3877 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3878 if (kvm->arch.epoch > gtod->tod)
3879 kvm->arch.epdx -= 1;
3880 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003881
3882 kvm_s390_vcpu_block_all(kvm);
3883 kvm_for_each_vcpu(i, vcpu, kvm) {
3884 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3885 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3886 }
3887
3888 kvm_s390_vcpu_unblock_all(kvm);
3889 preempt_enable();
3890 mutex_unlock(&kvm->lock);
3891}
3892
Thomas Huthfa576c52014-05-06 17:20:16 +02003893/**
3894 * kvm_arch_fault_in_page - fault-in guest page if necessary
3895 * @vcpu: The corresponding virtual cpu
3896 * @gpa: Guest physical address
3897 * @writable: Whether the page should be writable or not
3898 *
3899 * Make sure that a guest page has been faulted-in on the host.
3900 *
3901 * Return: Zero on success, negative error code otherwise.
3902 */
3903long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003904{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003905 return gmap_fault(vcpu->arch.gmap, gpa,
3906 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003907}
3908
Dominik Dingel3c038e62013-10-07 17:11:48 +02003909static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3910 unsigned long token)
3911{
3912 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003913 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003914
3915 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003916 irq.u.ext.ext_params2 = token;
3917 irq.type = KVM_S390_INT_PFAULT_INIT;
3918 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003919 } else {
3920 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003921 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003922 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3923 }
3924}
3925
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003926bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003927 struct kvm_async_pf *work)
3928{
3929 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3930 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003931
3932 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003933}
3934
3935void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3936 struct kvm_async_pf *work)
3937{
3938 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3939 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3940}
3941
3942void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3943 struct kvm_async_pf *work)
3944{
3945 /* s390 will always inject the page directly */
3946}
3947
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003948bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003949{
3950 /*
3951 * s390 will always inject the page directly,
3952 * but we still want check_async_completion to cleanup
3953 */
3954 return true;
3955}
3956
3957static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3958{
3959 hva_t hva;
3960 struct kvm_arch_async_pf arch;
3961 int rc;
3962
3963 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3964 return 0;
3965 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3966 vcpu->arch.pfault_compare)
3967 return 0;
3968 if (psw_extint_disabled(vcpu))
3969 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003970 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003971 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003972 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003973 return 0;
3974 if (!vcpu->arch.gmap->pfault_enabled)
3975 return 0;
3976
Heiko Carstens81480cc2014-01-01 16:36:07 +01003977 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3978 hva += current->thread.gmap_addr & ~PAGE_MASK;
3979 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003980 return 0;
3981
3982 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3983 return rc;
3984}
3985
Thomas Huth3fb4c402013-09-12 10:33:43 +02003986static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003987{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003988 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003989
Dominik Dingel3c038e62013-10-07 17:11:48 +02003990 /*
3991 * On s390 notifications for arriving pages will be delivered directly
3992 * to the guest but the house keeping for completed pfaults is
3993 * handled outside the worker.
3994 */
3995 kvm_check_async_pf_completion(vcpu);
3996
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003997 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3998 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003999
4000 if (need_resched())
4001 schedule();
4002
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02004003 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02004004 s390_handle_mcck();
4005
Jens Freimann79395032014-04-17 10:10:30 +02004006 if (!kvm_is_ucontrol(vcpu->kvm)) {
4007 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4008 if (rc)
4009 return rc;
4010 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004011
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004012 rc = kvm_s390_handle_requests(vcpu);
4013 if (rc)
4014 return rc;
4015
David Hildenbrand27291e22014-01-23 12:26:52 +01004016 if (guestdbg_enabled(vcpu)) {
4017 kvm_s390_backup_guest_per_regs(vcpu);
4018 kvm_s390_patch_guest_per_regs(vcpu);
4019 }
4020
Michael Mueller9f30f622019-01-31 09:52:44 +01004021 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
4022
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004023 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004024 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4025 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4026 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004027
Thomas Huth3fb4c402013-09-12 10:33:43 +02004028 return 0;
4029}
4030
Thomas Huth492d8642015-02-10 16:11:01 +01004031static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4032{
David Hildenbrand56317922016-01-12 17:37:58 +01004033 struct kvm_s390_pgm_info pgm_info = {
4034 .code = PGM_ADDRESSING,
4035 };
4036 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004037 int rc;
4038
4039 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4040 trace_kvm_s390_sie_fault(vcpu);
4041
4042 /*
4043 * We want to inject an addressing exception, which is defined as a
4044 * suppressing or terminating exception. However, since we came here
4045 * by a DAT access exception, the PSW still points to the faulting
4046 * instruction since DAT exceptions are nullifying. So we've got
4047 * to look up the current opcode to get the length of the instruction
4048 * to be able to forward the PSW.
4049 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004050 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004051 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004052 if (rc < 0) {
4053 return rc;
4054 } else if (rc) {
4055 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4056 * Forward by arbitrary ilc, injection will take care of
4057 * nullification if necessary.
4058 */
4059 pgm_info = vcpu->arch.pgm;
4060 ilen = 4;
4061 }
David Hildenbrand56317922016-01-12 17:37:58 +01004062 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4063 kvm_s390_forward_psw(vcpu, ilen);
4064 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004065}
4066
Thomas Huth3fb4c402013-09-12 10:33:43 +02004067static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4068{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004069 struct mcck_volatile_info *mcck_info;
4070 struct sie_page *sie_page;
4071
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004072 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4073 vcpu->arch.sie_block->icptcode);
4074 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4075
David Hildenbrand27291e22014-01-23 12:26:52 +01004076 if (guestdbg_enabled(vcpu))
4077 kvm_s390_restore_guest_per_regs(vcpu);
4078
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004079 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4080 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004081
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004082 if (exit_reason == -EINTR) {
4083 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4084 sie_page = container_of(vcpu->arch.sie_block,
4085 struct sie_page, sie_block);
4086 mcck_info = &sie_page->mcck_info;
4087 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4088 return 0;
4089 }
4090
David Hildenbrand71f116b2015-10-19 16:24:28 +02004091 if (vcpu->arch.sie_block->icptcode > 0) {
4092 int rc = kvm_handle_sie_intercept(vcpu);
4093
4094 if (rc != -EOPNOTSUPP)
4095 return rc;
4096 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4097 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4098 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4099 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4100 return -EREMOTE;
4101 } else if (exit_reason != -EFAULT) {
4102 vcpu->stat.exit_null++;
4103 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004104 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4105 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4106 vcpu->run->s390_ucontrol.trans_exc_code =
4107 current->thread.gmap_addr;
4108 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004109 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004110 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004111 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004112 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004113 if (kvm_arch_setup_async_pf(vcpu))
4114 return 0;
4115 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004116 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004117 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004118}
4119
Janosch Frank3adae0b2019-12-13 08:26:06 -05004120#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004121static int __vcpu_run(struct kvm_vcpu *vcpu)
4122{
4123 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004124 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004125
Thomas Huth800c1062013-09-12 10:33:45 +02004126 /*
4127 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4128 * ning the guest), so that memslots (and other stuff) are protected
4129 */
4130 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4131
Thomas Hutha76ccff2013-09-12 10:33:44 +02004132 do {
4133 rc = vcpu_pre_run(vcpu);
4134 if (rc)
4135 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004136
Thomas Huth800c1062013-09-12 10:33:45 +02004137 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004138 /*
4139 * As PF_VCPU will be used in fault handler, between
4140 * guest_enter and guest_exit should be no uaccess.
4141 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004142 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004143 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004144 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004145 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004146 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4147 memcpy(sie_page->pv_grregs,
4148 vcpu->run->s.regs.gprs,
4149 sizeof(sie_page->pv_grregs));
4150 }
Thomas Hutha76ccff2013-09-12 10:33:44 +02004151 exit_reason = sie64a(vcpu->arch.sie_block,
4152 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004153 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4154 memcpy(vcpu->run->s.regs.gprs,
4155 sie_page->pv_grregs,
4156 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004157 /*
4158 * We're not allowed to inject interrupts on intercepts
4159 * that leave the guest state in an "in-between" state
4160 * where the next SIE entry will do a continuation.
4161 * Fence interrupts in our "internal" PSW.
4162 */
4163 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4164 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4165 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4166 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004167 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004168 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004169 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004170 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004171 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004172 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004173
Thomas Hutha76ccff2013-09-12 10:33:44 +02004174 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004175 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004176
Thomas Huth800c1062013-09-12 10:33:45 +02004177 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004178 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004179}
4180
Janosch Frank811ea792019-06-14 13:11:21 +02004181static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004182{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004183 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004184 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004185
4186 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004187 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004188 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4189 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004190 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004191 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4192 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4193 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4194 }
4195 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4196 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4197 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4198 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004199 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4200 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004201 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004202 /*
4203 * If userspace sets the riccb (e.g. after migration) to a valid state,
4204 * we should enable RI here instead of doing the lazy enablement.
4205 */
4206 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004207 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004208 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004209 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004210 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004211 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004212 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004213 /*
4214 * If userspace sets the gscb (e.g. after migration) to non-zero,
4215 * we should enable GS here instead of doing the lazy enablement.
4216 */
4217 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4218 test_kvm_facility(vcpu->kvm, 133) &&
4219 gscb->gssm &&
4220 !vcpu->arch.gs_enabled) {
4221 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4222 vcpu->arch.sie_block->ecb |= ECB_GS;
4223 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4224 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004225 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004226 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4227 test_kvm_facility(vcpu->kvm, 82)) {
4228 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4229 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4230 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004231 if (MACHINE_HAS_GS) {
4232 preempt_disable();
4233 __ctl_set_bit(2, 4);
4234 if (current->thread.gs_cb) {
4235 vcpu->arch.host_gscb = current->thread.gs_cb;
4236 save_gs_cb(vcpu->arch.host_gscb);
4237 }
4238 if (vcpu->arch.gs_enabled) {
4239 current->thread.gs_cb = (struct gs_cb *)
4240 &vcpu->run->s.regs.gscb;
4241 restore_gs_cb(current->thread.gs_cb);
4242 }
4243 preempt_enable();
4244 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004245 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004246}
4247
4248static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4249{
4250 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4251 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4252 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4253 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4254 /* some control register changes require a tlb flush */
4255 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4256 }
4257 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4258 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4259 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4260 }
4261 save_access_regs(vcpu->arch.host_acrs);
4262 restore_access_regs(vcpu->run->s.regs.acrs);
4263 /* save host (userspace) fprs/vrs */
4264 save_fpu_regs();
4265 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4266 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4267 if (MACHINE_HAS_VX)
4268 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4269 else
4270 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4271 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4272 if (test_fp_ctl(current->thread.fpu.fpc))
4273 /* User space provided an invalid FPC, let's clear it */
4274 current->thread.fpu.fpc = 0;
4275
4276 /* Sync fmt2 only data */
4277 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4278 sync_regs_fmt2(vcpu, kvm_run);
4279 } else {
4280 /*
4281 * In several places we have to modify our internal view to
4282 * not do things that are disallowed by the ultravisor. For
4283 * example we must not inject interrupts after specific exits
4284 * (e.g. 112 prefix page not secure). We do this by turning
4285 * off the machine check, external and I/O interrupt bits
4286 * of our PSW copy. To avoid getting validity intercepts, we
4287 * do only accept the condition code from userspace.
4288 */
4289 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4290 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4291 PSW_MASK_CC;
4292 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004293
David Hildenbrandb028ee32014-07-17 10:47:43 +02004294 kvm_run->kvm_dirty_regs = 0;
4295}
4296
Janosch Frank811ea792019-06-14 13:11:21 +02004297static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004298{
David Hildenbrandb028ee32014-07-17 10:47:43 +02004299 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4300 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4301 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004302 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004303 if (MACHINE_HAS_GS) {
4304 __ctl_set_bit(2, 4);
4305 if (vcpu->arch.gs_enabled)
4306 save_gs_cb(current->thread.gs_cb);
4307 preempt_disable();
4308 current->thread.gs_cb = vcpu->arch.host_gscb;
4309 restore_gs_cb(vcpu->arch.host_gscb);
4310 preempt_enable();
4311 if (!vcpu->arch.host_gscb)
4312 __ctl_clear_bit(2, 4);
4313 vcpu->arch.host_gscb = NULL;
4314 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004315 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004316}
4317
Janosch Frank811ea792019-06-14 13:11:21 +02004318static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4319{
4320 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4321 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4322 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4323 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4324 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4325 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4326 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4327 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4328 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4329 save_access_regs(vcpu->run->s.regs.acrs);
4330 restore_access_regs(vcpu->arch.host_acrs);
4331 /* Save guest register state */
4332 save_fpu_regs();
4333 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4334 /* Restore will be done lazily at return */
4335 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4336 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4337 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4338 store_regs_fmt2(vcpu, kvm_run);
4339}
4340
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004341int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004342{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004343 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004344 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004345
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004346 if (kvm_run->immediate_exit)
4347 return -EINTR;
4348
Thomas Huth200824f2019-09-04 10:51:59 +02004349 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4350 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4351 return -EINVAL;
4352
Christoffer Dallaccb7572017-12-04 21:35:25 +01004353 vcpu_load(vcpu);
4354
David Hildenbrand27291e22014-01-23 12:26:52 +01004355 if (guestdbg_exit_pending(vcpu)) {
4356 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004357 rc = 0;
4358 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004359 }
4360
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004361 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004362
Janosch Frankfe28c7862019-05-15 13:24:30 +02004363 /*
4364 * no need to check the return value of vcpu_start as it can only have
4365 * an error for protvirt, but protvirt means user cpu state
4366 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004367 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4368 kvm_s390_vcpu_start(vcpu);
4369 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004370 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004371 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004372 rc = -EINVAL;
4373 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004374 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004375
David Hildenbrandb028ee32014-07-17 10:47:43 +02004376 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004377 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004378
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004379 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004380 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004381
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004382 if (signal_pending(current) && !rc) {
4383 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004384 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004385 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004386
David Hildenbrand27291e22014-01-23 12:26:52 +01004387 if (guestdbg_exit_pending(vcpu) && !rc) {
4388 kvm_s390_prepare_debug_exit(vcpu);
4389 rc = 0;
4390 }
4391
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004392 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004393 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004394 rc = 0;
4395 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004396
David Hildenbranddb0758b2016-02-15 09:42:25 +01004397 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004398 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004399
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004400 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004401
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004402 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004403out:
4404 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004405 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004406}
4407
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004408/*
4409 * store status at address
4410 * we use have two special cases:
4411 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4412 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4413 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004414int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004415{
Carsten Otte092670c2011-07-24 10:48:22 +02004416 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004417 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004418 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004419 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004420 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004421
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004422 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004423 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4424 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004425 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004426 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004427 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4428 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004429 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004430 gpa = px;
4431 } else
4432 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004433
4434 /* manually convert vector registers if necessary */
4435 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004436 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004437 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4438 fprs, 128);
4439 } else {
4440 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004441 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004442 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004443 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004444 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004445 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004446 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004447 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004448 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004449 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004450 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004451 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004452 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004453 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004454 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004455 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004456 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004457 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004458 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004459 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004460 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004461 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004462 &vcpu->arch.sie_block->gcr, 128);
4463 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004464}
4465
Thomas Huthe8798922013-11-06 15:46:33 +01004466int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4467{
4468 /*
4469 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004470 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004471 * it into the save area
4472 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004473 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004474 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004475 save_access_regs(vcpu->run->s.regs.acrs);
4476
4477 return kvm_s390_store_status_unloaded(vcpu, addr);
4478}
4479
David Hildenbrand8ad35752014-03-14 11:00:21 +01004480static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4481{
4482 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004483 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004484}
4485
4486static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4487{
4488 unsigned int i;
4489 struct kvm_vcpu *vcpu;
4490
4491 kvm_for_each_vcpu(i, vcpu, kvm) {
4492 __disable_ibs_on_vcpu(vcpu);
4493 }
4494}
4495
4496static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4497{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004498 if (!sclp.has_ibs)
4499 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004500 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004501 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004502}
4503
Janosch Frankfe28c7862019-05-15 13:24:30 +02004504int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004505{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004506 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004507
4508 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004509 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004510
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004511 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004512 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004513 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004514 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4515
Janosch Frankfe28c7862019-05-15 13:24:30 +02004516 /* Let's tell the UV that we want to change into the operating state */
4517 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4518 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4519 if (r) {
4520 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4521 return r;
4522 }
4523 }
4524
David Hildenbrand8ad35752014-03-14 11:00:21 +01004525 for (i = 0; i < online_vcpus; i++) {
4526 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4527 started_vcpus++;
4528 }
4529
4530 if (started_vcpus == 0) {
4531 /* we're the only active VCPU -> speed it up */
4532 __enable_ibs_on_vcpu(vcpu);
4533 } else if (started_vcpus == 1) {
4534 /*
4535 * As we are starting a second VCPU, we have to disable
4536 * the IBS facility on all VCPUs to remove potentially
4537 * oustanding ENABLE requests.
4538 */
4539 __disable_ibs_on_all_vcpus(vcpu->kvm);
4540 }
4541
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004542 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004543 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004544 * The real PSW might have changed due to a RESTART interpreted by the
4545 * ultravisor. We block all interrupts and let the next sie exit
4546 * refresh our view.
4547 */
4548 if (kvm_s390_pv_cpu_is_protected(vcpu))
4549 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4550 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004551 * Another VCPU might have used IBS while we were offline.
4552 * Let's play safe and flush the VCPU at startup.
4553 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004554 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004555 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004556 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004557}
4558
Janosch Frankfe28c7862019-05-15 13:24:30 +02004559int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004560{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004561 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004562 struct kvm_vcpu *started_vcpu = NULL;
4563
4564 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004565 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004566
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004567 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004568 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004569 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004570 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4571
Janosch Frankfe28c7862019-05-15 13:24:30 +02004572 /* Let's tell the UV that we want to change into the stopped state */
4573 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4574 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4575 if (r) {
4576 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4577 return r;
4578 }
4579 }
4580
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004581 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004582 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004583
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004584 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004585 __disable_ibs_on_vcpu(vcpu);
4586
4587 for (i = 0; i < online_vcpus; i++) {
4588 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4589 started_vcpus++;
4590 started_vcpu = vcpu->kvm->vcpus[i];
4591 }
4592 }
4593
4594 if (started_vcpus == 1) {
4595 /*
4596 * As we only have one VCPU left, we want to enable the
4597 * IBS facility for that VCPU to speed it up.
4598 */
4599 __enable_ibs_on_vcpu(started_vcpu);
4600 }
4601
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004602 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004603 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004604}
4605
Cornelia Huckd6712df2012-12-20 15:32:11 +01004606static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4607 struct kvm_enable_cap *cap)
4608{
4609 int r;
4610
4611 if (cap->flags)
4612 return -EINVAL;
4613
4614 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004615 case KVM_CAP_S390_CSS_SUPPORT:
4616 if (!vcpu->kvm->arch.css_support) {
4617 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004618 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004619 trace_kvm_s390_enable_css(vcpu->kvm);
4620 }
4621 r = 0;
4622 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004623 default:
4624 r = -EINVAL;
4625 break;
4626 }
4627 return r;
4628}
4629
Janosch Frank19e12272019-04-02 09:21:06 +02004630static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4631 struct kvm_s390_mem_op *mop)
4632{
4633 void __user *uaddr = (void __user *)mop->buf;
4634 int r = 0;
4635
4636 if (mop->flags || !mop->size)
4637 return -EINVAL;
4638 if (mop->size + mop->sida_offset < mop->size)
4639 return -EINVAL;
4640 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4641 return -E2BIG;
4642
4643 switch (mop->op) {
4644 case KVM_S390_MEMOP_SIDA_READ:
4645 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4646 mop->sida_offset), mop->size))
4647 r = -EFAULT;
4648
4649 break;
4650 case KVM_S390_MEMOP_SIDA_WRITE:
4651 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4652 mop->sida_offset), uaddr, mop->size))
4653 r = -EFAULT;
4654 break;
4655 }
4656 return r;
4657}
Thomas Huth41408c282015-02-06 15:01:21 +01004658static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4659 struct kvm_s390_mem_op *mop)
4660{
4661 void __user *uaddr = (void __user *)mop->buf;
4662 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004663 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004664 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4665 | KVM_S390_MEMOP_F_CHECK_ONLY;
4666
Thomas Hutha13b03b2019-08-29 14:25:17 +02004667 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004668 return -EINVAL;
4669
4670 if (mop->size > MEM_OP_MAX_SIZE)
4671 return -E2BIG;
4672
Janosch Frank19e12272019-04-02 09:21:06 +02004673 if (kvm_s390_pv_cpu_is_protected(vcpu))
4674 return -EINVAL;
4675
Thomas Huth41408c282015-02-06 15:01:21 +01004676 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4677 tmpbuf = vmalloc(mop->size);
4678 if (!tmpbuf)
4679 return -ENOMEM;
4680 }
4681
Thomas Huth41408c282015-02-06 15:01:21 +01004682 switch (mop->op) {
4683 case KVM_S390_MEMOP_LOGICAL_READ:
4684 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004685 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4686 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004687 break;
4688 }
4689 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4690 if (r == 0) {
4691 if (copy_to_user(uaddr, tmpbuf, mop->size))
4692 r = -EFAULT;
4693 }
4694 break;
4695 case KVM_S390_MEMOP_LOGICAL_WRITE:
4696 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004697 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4698 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004699 break;
4700 }
4701 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4702 r = -EFAULT;
4703 break;
4704 }
4705 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4706 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004707 }
4708
Thomas Huth41408c282015-02-06 15:01:21 +01004709 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4710 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4711
4712 vfree(tmpbuf);
4713 return r;
4714}
4715
Janosch Frank19e12272019-04-02 09:21:06 +02004716static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4717 struct kvm_s390_mem_op *mop)
4718{
4719 int r, srcu_idx;
4720
4721 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4722
4723 switch (mop->op) {
4724 case KVM_S390_MEMOP_LOGICAL_READ:
4725 case KVM_S390_MEMOP_LOGICAL_WRITE:
4726 r = kvm_s390_guest_mem_op(vcpu, mop);
4727 break;
4728 case KVM_S390_MEMOP_SIDA_READ:
4729 case KVM_S390_MEMOP_SIDA_WRITE:
4730 /* we are locked against sida going away by the vcpu->mutex */
4731 r = kvm_s390_guest_sida_op(vcpu, mop);
4732 break;
4733 default:
4734 r = -EINVAL;
4735 }
4736
4737 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4738 return r;
4739}
4740
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004741long kvm_arch_vcpu_async_ioctl(struct file *filp,
4742 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004743{
4744 struct kvm_vcpu *vcpu = filp->private_data;
4745 void __user *argp = (void __user *)arg;
4746
Avi Kivity93736622010-05-13 12:35:17 +03004747 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004748 case KVM_S390_IRQ: {
4749 struct kvm_s390_irq s390irq;
4750
Jens Freimann47b43c52014-11-11 20:57:06 +01004751 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004752 return -EFAULT;
4753 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004754 }
Avi Kivity93736622010-05-13 12:35:17 +03004755 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004756 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004757 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004758
4759 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004760 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004761 if (s390int_to_s390irq(&s390int, &s390irq))
4762 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004763 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004764 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004765 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004766 return -ENOIOCTLCMD;
4767}
4768
4769long kvm_arch_vcpu_ioctl(struct file *filp,
4770 unsigned int ioctl, unsigned long arg)
4771{
4772 struct kvm_vcpu *vcpu = filp->private_data;
4773 void __user *argp = (void __user *)arg;
4774 int idx;
4775 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004776 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004777
4778 vcpu_load(vcpu);
4779
4780 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004781 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004782 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004783 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004784 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004785 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004786 case KVM_S390_SET_INITIAL_PSW: {
4787 psw_t psw;
4788
Avi Kivitybc923cc2010-05-13 12:21:46 +03004789 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004790 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004791 break;
4792 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4793 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004794 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004795 case KVM_S390_CLEAR_RESET:
4796 r = 0;
4797 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004798 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4799 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4800 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4801 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4802 rc, rrc);
4803 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004804 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004805 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004806 r = 0;
4807 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004808 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4809 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4810 UVC_CMD_CPU_RESET_INITIAL,
4811 &rc, &rrc);
4812 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4813 rc, rrc);
4814 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004815 break;
4816 case KVM_S390_NORMAL_RESET:
4817 r = 0;
4818 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004819 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4820 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4821 UVC_CMD_CPU_RESET, &rc, &rrc);
4822 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4823 rc, rrc);
4824 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004825 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004826 case KVM_SET_ONE_REG:
4827 case KVM_GET_ONE_REG: {
4828 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004829 r = -EINVAL;
4830 if (kvm_s390_pv_cpu_is_protected(vcpu))
4831 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004832 r = -EFAULT;
4833 if (copy_from_user(&reg, argp, sizeof(reg)))
4834 break;
4835 if (ioctl == KVM_SET_ONE_REG)
4836 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4837 else
4838 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4839 break;
4840 }
Carsten Otte27e03932012-01-04 10:25:21 +01004841#ifdef CONFIG_KVM_S390_UCONTROL
4842 case KVM_S390_UCAS_MAP: {
4843 struct kvm_s390_ucas_mapping ucasmap;
4844
4845 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4846 r = -EFAULT;
4847 break;
4848 }
4849
4850 if (!kvm_is_ucontrol(vcpu->kvm)) {
4851 r = -EINVAL;
4852 break;
4853 }
4854
4855 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4856 ucasmap.vcpu_addr, ucasmap.length);
4857 break;
4858 }
4859 case KVM_S390_UCAS_UNMAP: {
4860 struct kvm_s390_ucas_mapping ucasmap;
4861
4862 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4863 r = -EFAULT;
4864 break;
4865 }
4866
4867 if (!kvm_is_ucontrol(vcpu->kvm)) {
4868 r = -EINVAL;
4869 break;
4870 }
4871
4872 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4873 ucasmap.length);
4874 break;
4875 }
4876#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004877 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004878 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004879 break;
4880 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004881 case KVM_ENABLE_CAP:
4882 {
4883 struct kvm_enable_cap cap;
4884 r = -EFAULT;
4885 if (copy_from_user(&cap, argp, sizeof(cap)))
4886 break;
4887 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4888 break;
4889 }
Thomas Huth41408c282015-02-06 15:01:21 +01004890 case KVM_S390_MEM_OP: {
4891 struct kvm_s390_mem_op mem_op;
4892
4893 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004894 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004895 else
4896 r = -EFAULT;
4897 break;
4898 }
Jens Freimann816c7662014-11-24 17:13:46 +01004899 case KVM_S390_SET_IRQ_STATE: {
4900 struct kvm_s390_irq_state irq_state;
4901
4902 r = -EFAULT;
4903 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4904 break;
4905 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4906 irq_state.len == 0 ||
4907 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4908 r = -EINVAL;
4909 break;
4910 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004911 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004912 r = kvm_s390_set_irq_state(vcpu,
4913 (void __user *) irq_state.buf,
4914 irq_state.len);
4915 break;
4916 }
4917 case KVM_S390_GET_IRQ_STATE: {
4918 struct kvm_s390_irq_state irq_state;
4919
4920 r = -EFAULT;
4921 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4922 break;
4923 if (irq_state.len == 0) {
4924 r = -EINVAL;
4925 break;
4926 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004927 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004928 r = kvm_s390_get_irq_state(vcpu,
4929 (__u8 __user *) irq_state.buf,
4930 irq_state.len);
4931 break;
4932 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004933 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004934 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004935 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004936
4937 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004938 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004939}
4940
Souptick Joarder1499fa82018-04-19 00:49:58 +05304941vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004942{
4943#ifdef CONFIG_KVM_S390_UCONTROL
4944 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4945 && (kvm_is_ucontrol(vcpu->kvm))) {
4946 vmf->page = virt_to_page(vcpu->arch.sie_block);
4947 get_page(vmf->page);
4948 return 0;
4949 }
4950#endif
4951 return VM_FAULT_SIGBUS;
4952}
4953
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004954/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004955int kvm_arch_prepare_memory_region(struct kvm *kvm,
4956 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004957 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004958 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004959{
Nick Wangdd2887e2013-03-25 17:22:57 +01004960 /* A few sanity checks. We can have memory slots which have to be
4961 located/ended at a segment boundary (1MB). The memory in userland is
4962 ok to be fragmented into various different vmas. It is okay to mmap()
4963 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004964
Carsten Otte598841c2011-07-24 10:48:21 +02004965 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004966 return -EINVAL;
4967
Carsten Otte598841c2011-07-24 10:48:21 +02004968 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004969 return -EINVAL;
4970
Dominik Dingela3a92c32014-12-01 17:24:42 +01004971 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4972 return -EINVAL;
4973
Janosch Frank29b40f12019-09-30 04:19:18 -04004974 /* When we are protected, we should not change the memory slots */
4975 if (kvm_s390_pv_get_handle(kvm))
4976 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004977 return 0;
4978}
4979
4980void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004981 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08004982 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004983 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004984 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004985{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004986 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004987
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004988 switch (change) {
4989 case KVM_MR_DELETE:
4990 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4991 old->npages * PAGE_SIZE);
4992 break;
4993 case KVM_MR_MOVE:
4994 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4995 old->npages * PAGE_SIZE);
4996 if (rc)
4997 break;
Joe Perches3b684a42020-03-10 21:51:32 -07004998 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004999 case KVM_MR_CREATE:
5000 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5001 mem->guest_phys_addr, mem->memory_size);
5002 break;
5003 case KVM_MR_FLAGS_ONLY:
5004 break;
5005 default:
5006 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5007 }
Carsten Otte598841c2011-07-24 10:48:21 +02005008 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005009 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005010 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005011}
5012
Alexander Yarygin60a37702016-04-01 15:38:57 +03005013static inline unsigned long nonhyp_mask(int i)
5014{
5015 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5016
5017 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5018}
5019
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005020void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5021{
5022 vcpu->valid_wakeup = false;
5023}
5024
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005025static int __init kvm_s390_init(void)
5026{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005027 int i;
5028
David Hildenbrand07197fd2015-01-30 16:01:38 +01005029 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005030 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005031 return -ENODEV;
5032 }
5033
Janosch Franka4499382018-07-13 11:28:31 +01005034 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005035 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005036 return -EINVAL;
5037 }
5038
Alexander Yarygin60a37702016-04-01 15:38:57 +03005039 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005040 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03005041 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
5042
Michael Mueller9d8d5782015-02-02 15:42:51 +01005043 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005044}
5045
5046static void __exit kvm_s390_exit(void)
5047{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005048 kvm_exit();
5049}
5050
5051module_init(kvm_s390_init);
5052module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005053
5054/*
5055 * Enable autoloading of the kvm module.
5056 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5057 * since x86 takes a different approach.
5058 */
5059#include <linux/miscdevice.h>
5060MODULE_ALIAS_MISCDEV(KVM_MINOR);
5061MODULE_ALIAS("devname:kvm");