blob: 00f03f363c9b0b848a83174b5074df5c52acf47c [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070034#include <linux/pgtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010048#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010049#include "gaccess.h"
50
Cornelia Huck5786fff2012-07-23 17:20:29 +020051#define CREATE_TRACE_POINTS
52#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020053#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020054
Thomas Huth41408c282015-02-06 15:01:21 +010055#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010056#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010059
Heiko Carstensb0c632d2008-03-25 18:47:20 +010060struct kvm_stats_debugfs_item debugfs_entries[] = {
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020061 VCPU_STAT("userspace_handled", exit_userspace),
62 VCPU_STAT("exit_null", exit_null),
63 VCPU_STAT("exit_validity", exit_validity),
64 VCPU_STAT("exit_stop_request", exit_stop_request),
65 VCPU_STAT("exit_external_request", exit_external_request),
66 VCPU_STAT("exit_io_request", exit_io_request),
67 VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
68 VCPU_STAT("exit_instruction", exit_instruction),
69 VCPU_STAT("exit_pei", exit_pei),
70 VCPU_STAT("exit_program_interruption", exit_program_interruption),
71 VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
72 VCPU_STAT("exit_operation_exception", exit_operation_exception),
73 VCPU_STAT("halt_successful_poll", halt_successful_poll),
74 VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
75 VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
76 VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
77 VCPU_STAT("halt_wakeup", halt_wakeup),
David Matlackcb953122020-05-08 11:22:40 -070078 VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
79 VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
Emanuele Giuseppe Esposito812756a2020-04-14 17:56:25 +020080 VCPU_STAT("instruction_lctlg", instruction_lctlg),
81 VCPU_STAT("instruction_lctl", instruction_lctl),
82 VCPU_STAT("instruction_stctl", instruction_stctl),
83 VCPU_STAT("instruction_stctg", instruction_stctg),
84 VCPU_STAT("deliver_ckc", deliver_ckc),
85 VCPU_STAT("deliver_cputm", deliver_cputm),
86 VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
87 VCPU_STAT("deliver_external_call", deliver_external_call),
88 VCPU_STAT("deliver_service_signal", deliver_service_signal),
89 VCPU_STAT("deliver_virtio", deliver_virtio),
90 VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
91 VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
92 VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
93 VCPU_STAT("deliver_program", deliver_program),
94 VCPU_STAT("deliver_io", deliver_io),
95 VCPU_STAT("deliver_machine_check", deliver_machine_check),
96 VCPU_STAT("exit_wait_state", exit_wait_state),
97 VCPU_STAT("inject_ckc", inject_ckc),
98 VCPU_STAT("inject_cputm", inject_cputm),
99 VCPU_STAT("inject_external_call", inject_external_call),
100 VM_STAT("inject_float_mchk", inject_float_mchk),
101 VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
102 VM_STAT("inject_io", inject_io),
103 VCPU_STAT("inject_mchk", inject_mchk),
104 VM_STAT("inject_pfault_done", inject_pfault_done),
105 VCPU_STAT("inject_program", inject_program),
106 VCPU_STAT("inject_restart", inject_restart),
107 VM_STAT("inject_service_signal", inject_service_signal),
108 VCPU_STAT("inject_set_prefix", inject_set_prefix),
109 VCPU_STAT("inject_stop_signal", inject_stop_signal),
110 VCPU_STAT("inject_pfault_init", inject_pfault_init),
111 VM_STAT("inject_virtio", inject_virtio),
112 VCPU_STAT("instruction_epsw", instruction_epsw),
113 VCPU_STAT("instruction_gs", instruction_gs),
114 VCPU_STAT("instruction_io_other", instruction_io_other),
115 VCPU_STAT("instruction_lpsw", instruction_lpsw),
116 VCPU_STAT("instruction_lpswe", instruction_lpswe),
117 VCPU_STAT("instruction_pfmf", instruction_pfmf),
118 VCPU_STAT("instruction_ptff", instruction_ptff),
119 VCPU_STAT("instruction_stidp", instruction_stidp),
120 VCPU_STAT("instruction_sck", instruction_sck),
121 VCPU_STAT("instruction_sckpf", instruction_sckpf),
122 VCPU_STAT("instruction_spx", instruction_spx),
123 VCPU_STAT("instruction_stpx", instruction_stpx),
124 VCPU_STAT("instruction_stap", instruction_stap),
125 VCPU_STAT("instruction_iske", instruction_iske),
126 VCPU_STAT("instruction_ri", instruction_ri),
127 VCPU_STAT("instruction_rrbe", instruction_rrbe),
128 VCPU_STAT("instruction_sske", instruction_sske),
129 VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
130 VCPU_STAT("instruction_essa", instruction_essa),
131 VCPU_STAT("instruction_stsi", instruction_stsi),
132 VCPU_STAT("instruction_stfl", instruction_stfl),
133 VCPU_STAT("instruction_tb", instruction_tb),
134 VCPU_STAT("instruction_tpi", instruction_tpi),
135 VCPU_STAT("instruction_tprot", instruction_tprot),
136 VCPU_STAT("instruction_tsch", instruction_tsch),
137 VCPU_STAT("instruction_sthyi", instruction_sthyi),
138 VCPU_STAT("instruction_sie", instruction_sie),
139 VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
140 VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
141 VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
142 VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
143 VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
144 VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
145 VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
146 VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
147 VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
148 VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
149 VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
150 VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
151 VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
152 VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
153 VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
154 VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
155 VCPU_STAT("instruction_diag_10", diagnose_10),
156 VCPU_STAT("instruction_diag_44", diagnose_44),
157 VCPU_STAT("instruction_diag_9c", diagnose_9c),
158 VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
159 VCPU_STAT("instruction_diag_258", diagnose_258),
160 VCPU_STAT("instruction_diag_308", diagnose_308),
161 VCPU_STAT("instruction_diag_500", diagnose_500),
162 VCPU_STAT("instruction_diag_other", diagnose_other),
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163 { NULL }
164};
165
Collin L. Walling8fa16962016-07-26 15:29:44 -0400166struct kvm_s390_tod_clock_ext {
167 __u8 epoch_idx;
168 __u64 tod;
169 __u8 reserved[7];
170} __packed;
171
David Hildenbranda411edf2016-02-02 15:41:22 +0100172/* allow nested virtualization in KVM (if enabled by user space) */
173static int nested;
174module_param(nested, int, S_IRUGO);
175MODULE_PARM_DESC(nested, "Nested virtualization support");
176
Janosch Franka4499382018-07-13 11:28:31 +0100177/* allow 1m huge page guest backing, if !nested */
178static int hpage;
179module_param(hpage, int, 0444);
180MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100181
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500182/* maximum percentage of steal time for polling. >100 is treated like 100 */
183static u8 halt_poll_max_steal = 10;
184module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000185MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500186
Michael Muellercc674ef2020-02-27 10:10:31 +0100187/* if set to true, the GISA will be initialized and used if available */
188static bool use_gisa = true;
189module_param(use_gisa, bool, 0644);
190MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
191
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000192/*
193 * For now we handle at most 16 double words as this is what the s390 base
194 * kernel handles and stores in the prefix page. If we ever need to go beyond
195 * this, this requires changes to code, but the external uapi can stay.
196 */
197#define SIZE_INTERNAL 16
198
199/*
200 * Base feature mask that defines default mask for facilities. Consists of the
201 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
202 */
203static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
204/*
205 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
206 * and defines the facilities that can be enabled via a cpu model.
207 */
208static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
209
210static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200211{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000212 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
213 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
214 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
215 sizeof(S390_lowcore.stfle_fac_list));
216
217 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200218}
219
David Hildenbrand15c97052015-03-19 17:36:43 +0100220/* available cpu features supported by kvm */
221static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200222/* available subfunctions indicated via query / "test bit" */
223static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100224
Michael Mueller9d8d5782015-02-02 15:42:51 +0100225static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200226static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200227debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400228debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100229
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100230/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200231int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100232{
233 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200234 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100235}
236
Sean Christophersonb9904082020-03-21 13:25:55 -0700237int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700238{
239 return 0;
240}
241
Janosch Frank29b40f12019-09-30 04:19:18 -0400242/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100243static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
244 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400245static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200246
David Hildenbrand15757672018-02-07 12:46:45 +0100247static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
248{
249 u8 delta_idx = 0;
250
251 /*
252 * The TOD jumps by delta, we have to compensate this by adding
253 * -delta to the epoch.
254 */
255 delta = -delta;
256
257 /* sign-extension - we're adding to signed values below */
258 if ((s64)delta < 0)
259 delta_idx = -1;
260
261 scb->epoch += delta;
262 if (scb->ecd & ECD_MEF) {
263 scb->epdx += delta_idx;
264 if (scb->epoch < delta)
265 scb->epdx += 1;
266 }
267}
268
Fan Zhangfdf03652015-05-13 10:58:41 +0200269/*
270 * This callback is executed during stop_machine(). All CPUs are therefore
271 * temporarily stopped. In order not to change guest behavior, we have to
272 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
273 * so a CPU won't be stopped while calculating with the epoch.
274 */
275static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
276 void *v)
277{
278 struct kvm *kvm;
279 struct kvm_vcpu *vcpu;
280 int i;
281 unsigned long long *delta = v;
282
283 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200284 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100285 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
286 if (i == 0) {
287 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
288 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
289 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100290 if (vcpu->arch.cputm_enabled)
291 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100292 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100293 kvm_clock_sync_scb(vcpu->arch.vsie_block,
294 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200295 }
296 }
297 return NOTIFY_OK;
298}
299
300static struct notifier_block kvm_clock_notifier = {
301 .notifier_call = kvm_clock_sync,
302};
303
Sean Christophersonb9904082020-03-21 13:25:55 -0700304int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100305{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200306 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100307 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200308 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
309 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200310 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
311 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100312 return 0;
313}
314
315void kvm_arch_hardware_unsetup(void)
316{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100317 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200318 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200319 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
320 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100321}
322
David Hildenbrand22be5a12016-01-21 13:22:54 +0100323static void allow_cpu_feat(unsigned long nr)
324{
325 set_bit_inv(nr, kvm_s390_available_cpu_feat);
326}
327
David Hildenbrand0a763c72016-05-18 16:03:47 +0200328static inline int plo_test_bit(unsigned char nr)
329{
Heiko Carstensb58b54e2021-06-21 16:03:56 +0200330 unsigned long function = (unsigned long)nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100331 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200332
333 asm volatile(
Heiko Carstensb58b54e2021-06-21 16:03:56 +0200334 " lgr 0,%[function]\n"
David Hildenbrand0a763c72016-05-18 16:03:47 +0200335 /* Parameter registers are ignored for "test bit" */
336 " plo 0,0,0,0(0)\n"
337 " ipm %0\n"
338 " srl %0,28\n"
339 : "=d" (cc)
Heiko Carstensb58b54e2021-06-21 16:03:56 +0200340 : [function] "d" (function)
341 : "cc", "0");
David Hildenbrand0a763c72016-05-18 16:03:47 +0200342 return cc == 0;
343}
344
Heiko Carstensd0dea732019-10-02 14:34:37 +0200345static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500346{
Christian Borntraegerd6681392019-02-20 03:04:07 -0500347 asm volatile(
Heiko Carstensb58b54e2021-06-21 16:03:56 +0200348 " lghi 0,0\n"
349 " lgr 1,%[query]\n"
350 /* Parameter registers are ignored */
Christian Borntraegerd6681392019-02-20 03:04:07 -0500351 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200352 :
Heiko Carstensb58b54e2021-06-21 16:03:56 +0200353 : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
354 : "cc", "memory", "0", "1");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500355}
356
Christian Borntraeger173aec22018-12-28 10:59:06 +0100357#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100358#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100359
David Hildenbrand22be5a12016-01-21 13:22:54 +0100360static void kvm_s390_cpu_feat_init(void)
361{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200362 int i;
363
364 for (i = 0; i < 256; ++i) {
365 if (plo_test_bit(i))
366 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
367 }
368
369 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400370 ptff(kvm_s390_available_subfunc.ptff,
371 sizeof(kvm_s390_available_subfunc.ptff),
372 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200373
374 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200375 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
376 kvm_s390_available_subfunc.kmac);
377 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
378 kvm_s390_available_subfunc.kmc);
379 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
380 kvm_s390_available_subfunc.km);
381 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
382 kvm_s390_available_subfunc.kimd);
383 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
384 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200385 }
386 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200387 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
388 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200389 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200390 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
391 kvm_s390_available_subfunc.kmctr);
392 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
393 kvm_s390_available_subfunc.kmf);
394 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
395 kvm_s390_available_subfunc.kmo);
396 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
397 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200398 }
399 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100400 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200401 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200402
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400403 if (test_facility(146)) /* MSA8 */
404 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
405 kvm_s390_available_subfunc.kma);
406
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100407 if (test_facility(155)) /* MSA9 */
408 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
409 kvm_s390_available_subfunc.kdsa);
410
Christian Borntraeger173aec22018-12-28 10:59:06 +0100411 if (test_facility(150)) /* SORTL */
412 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
413
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100414 if (test_facility(151)) /* DFLTCC */
415 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
416
David Hildenbrand22be5a12016-01-21 13:22:54 +0100417 if (MACHINE_HAS_ESOP)
418 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200419 /*
420 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
421 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
422 */
423 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100424 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200425 return;
426 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100427 if (sclp.has_64bscao)
428 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100429 if (sclp.has_siif)
430 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100431 if (sclp.has_gpere)
432 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100433 if (sclp.has_gsls)
434 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100435 if (sclp.has_ib)
436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100437 if (sclp.has_cei)
438 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100439 if (sclp.has_ibs)
440 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500441 if (sclp.has_kss)
442 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200443 /*
444 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
445 * all skey handling functions read/set the skey from the PGSTE
446 * instead of the real storage key.
447 *
448 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
449 * pages being detected as preserved although they are resident.
450 *
451 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
452 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
453 *
454 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
455 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
456 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
457 *
458 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
459 * cannot easily shadow the SCA because of the ipte lock.
460 */
David Hildenbrand22be5a12016-01-21 13:22:54 +0100461}
462
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100463int kvm_arch_init(void *opaque)
464{
Janosch Frankf76f6372019-10-02 03:56:27 -0400465 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100466
Christian Borntraeger78f26132015-07-22 15:50:58 +0200467 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
468 if (!kvm_s390_dbf)
469 return -ENOMEM;
470
Janosch Frank3e6c5562019-10-02 04:46:58 -0400471 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
472 if (!kvm_s390_dbf_uv)
473 goto out;
474
475 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
476 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400477 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200478
David Hildenbrand22be5a12016-01-21 13:22:54 +0100479 kvm_s390_cpu_feat_init();
480
Cornelia Huck84877d92014-09-02 10:27:35 +0100481 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100482 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
483 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100484 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400485 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100486 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100487
488 rc = kvm_s390_gib_init(GAL_ISC);
489 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400490 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100491
Michael Mueller308c3e62018-11-30 15:32:06 +0100492 return 0;
493
Janosch Frankf76f6372019-10-02 03:56:27 -0400494out:
495 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100496 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100497}
498
Christian Borntraeger78f26132015-07-22 15:50:58 +0200499void kvm_arch_exit(void)
500{
Michael Mueller1282c212019-01-31 09:52:40 +0100501 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200502 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400503 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200504}
505
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100506/* Section: device related */
507long kvm_arch_dev_ioctl(struct file *filp,
508 unsigned int ioctl, unsigned long arg)
509{
510 if (ioctl == KVM_S390_ENABLE_SIE)
511 return s390_enable_sie();
512 return -EINVAL;
513}
514
Alexander Graf784aa3d2014-07-14 18:27:35 +0200515int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100516{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100517 int r;
518
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200519 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100520 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200521 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100522 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100523#ifdef CONFIG_KVM_S390_UCONTROL
524 case KVM_CAP_S390_UCONTROL:
525#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200526 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100527 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200528 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100529 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100530 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100531 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200532 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200533 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200534 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200535 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100536 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100537 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200538 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100539 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400540 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100541 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200542 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200543 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100544 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100545 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500546 case KVM_CAP_S390_VCPU_RESETS:
Peter Xub9b27822020-05-05 11:47:50 -0400547 case KVM_CAP_SET_GUEST_DEBUG:
Collin Walling23a60f82020-06-22 11:46:36 -0400548 case KVM_CAP_S390_DIAG318:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100549 r = 1;
550 break;
Janosch Franka4499382018-07-13 11:28:31 +0100551 case KVM_CAP_S390_HPAGE_1M:
552 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100553 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100554 r = 1;
555 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100556 case KVM_CAP_S390_MEM_OP:
557 r = MEM_OP_MAX_SIZE;
558 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200559 case KVM_CAP_NR_VCPUS:
560 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200561 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100562 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200563 if (!kvm_s390_use_sca_entries())
564 r = KVM_MAX_VCPUS;
565 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100566 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200567 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200568 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100569 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200570 break;
Eric Farman68c55752014-06-09 10:57:26 -0400571 case KVM_CAP_S390_VECTOR_REGISTERS:
572 r = MACHINE_HAS_VX;
573 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800574 case KVM_CAP_S390_RI:
575 r = test_facility(64);
576 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100577 case KVM_CAP_S390_GS:
578 r = test_facility(133);
579 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100580 case KVM_CAP_S390_BPB:
581 r = test_facility(82);
582 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500583 case KVM_CAP_S390_PROTECTED:
584 r = is_prot_virt_host();
585 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200586 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100587 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200588 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100589 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100590}
591
Sean Christopherson0dff0842020-02-18 13:07:29 -0800592void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400593{
Janosch Frank0959e162018-07-17 13:21:22 +0100594 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400595 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100596 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400597 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100598 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400599
Janosch Frank0959e162018-07-17 13:21:22 +0100600 /* Loop over all guest segments */
601 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400602 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100603 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
604 gaddr = gfn_to_gpa(cur_gfn);
605 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
606 if (kvm_is_error_hva(vmaddr))
607 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400608
Janosch Frank0959e162018-07-17 13:21:22 +0100609 bitmap_zero(bitmap, _PAGE_ENTRIES);
610 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
611 for (i = 0; i < _PAGE_ENTRIES; i++) {
612 if (test_bit(i, bitmap))
613 mark_page_dirty(kvm, cur_gfn + i);
614 }
615
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100616 if (fatal_signal_pending(current))
617 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100618 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400619 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400620}
621
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100622/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200623static void sca_del_vcpu(struct kvm_vcpu *vcpu);
624
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100625/*
626 * Get (and clear) the dirty memory log for a memory slot.
627 */
628int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
629 struct kvm_dirty_log *log)
630{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400631 int r;
632 unsigned long n;
633 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800634 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400635
Janosch Franke1e8a962017-02-02 16:39:31 +0100636 if (kvm_is_ucontrol(kvm))
637 return -EINVAL;
638
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400639 mutex_lock(&kvm->slots_lock);
640
641 r = -EINVAL;
642 if (log->slot >= KVM_USER_MEM_SLOTS)
643 goto out;
644
Sean Christopherson2a49f612020-02-18 13:07:30 -0800645 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400646 if (r)
647 goto out;
648
649 /* Clear the dirty log */
650 if (is_dirty) {
651 n = kvm_dirty_bitmap_bytes(memslot);
652 memset(memslot->dirty_bitmap, 0, n);
653 }
654 r = 0;
655out:
656 mutex_unlock(&kvm->slots_lock);
657 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100658}
659
David Hildenbrand6502a342016-06-21 14:19:51 +0200660static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
661{
662 unsigned int i;
663 struct kvm_vcpu *vcpu;
664
665 kvm_for_each_vcpu(i, vcpu, kvm) {
666 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
667 }
668}
669
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100670int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200671{
672 int r;
673
674 if (cap->flags)
675 return -EINVAL;
676
677 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200678 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200679 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200680 kvm->arch.use_irqchip = 1;
681 r = 0;
682 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200683 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200684 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200685 kvm->arch.user_sigp = 1;
686 r = 0;
687 break;
Eric Farman68c55752014-06-09 10:57:26 -0400688 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100689 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200690 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100691 r = -EBUSY;
692 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100693 set_kvm_facility(kvm->arch.model.fac_mask, 129);
694 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200695 if (test_facility(134)) {
696 set_kvm_facility(kvm->arch.model.fac_mask, 134);
697 set_kvm_facility(kvm->arch.model.fac_list, 134);
698 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100699 if (test_facility(135)) {
700 set_kvm_facility(kvm->arch.model.fac_mask, 135);
701 set_kvm_facility(kvm->arch.model.fac_list, 135);
702 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100703 if (test_facility(148)) {
704 set_kvm_facility(kvm->arch.model.fac_mask, 148);
705 set_kvm_facility(kvm->arch.model.fac_list, 148);
706 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100707 if (test_facility(152)) {
708 set_kvm_facility(kvm->arch.model.fac_mask, 152);
709 set_kvm_facility(kvm->arch.model.fac_list, 152);
710 }
Michael Mueller18280d82015-03-16 16:05:41 +0100711 r = 0;
712 } else
713 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100714 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200715 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
716 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400717 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800718 case KVM_CAP_S390_RI:
719 r = -EINVAL;
720 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200721 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800722 r = -EBUSY;
723 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100724 set_kvm_facility(kvm->arch.model.fac_mask, 64);
725 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800726 r = 0;
727 }
728 mutex_unlock(&kvm->lock);
729 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
730 r ? "(not available)" : "(success)");
731 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100732 case KVM_CAP_S390_AIS:
733 mutex_lock(&kvm->lock);
734 if (kvm->created_vcpus) {
735 r = -EBUSY;
736 } else {
737 set_kvm_facility(kvm->arch.model.fac_mask, 72);
738 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100739 r = 0;
740 }
741 mutex_unlock(&kvm->lock);
742 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
743 r ? "(not available)" : "(success)");
744 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100745 case KVM_CAP_S390_GS:
746 r = -EINVAL;
747 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100748 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100749 r = -EBUSY;
750 } else if (test_facility(133)) {
751 set_kvm_facility(kvm->arch.model.fac_mask, 133);
752 set_kvm_facility(kvm->arch.model.fac_list, 133);
753 r = 0;
754 }
755 mutex_unlock(&kvm->lock);
756 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
757 r ? "(not available)" : "(success)");
758 break;
Janosch Franka4499382018-07-13 11:28:31 +0100759 case KVM_CAP_S390_HPAGE_1M:
760 mutex_lock(&kvm->lock);
761 if (kvm->created_vcpus)
762 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100763 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100764 r = -EINVAL;
765 else {
766 r = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700767 mmap_write_lock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100768 kvm->mm->context.allow_gmap_hpage_1m = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700769 mmap_write_unlock(kvm->mm);
Janosch Franka4499382018-07-13 11:28:31 +0100770 /*
771 * We might have to create fake 4k page
772 * tables. To avoid that the hardware works on
773 * stale PGSTEs, we emulate these instructions.
774 */
775 kvm->arch.use_skf = 0;
776 kvm->arch.use_pfmfi = 0;
777 }
778 mutex_unlock(&kvm->lock);
779 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
780 r ? "(not available)" : "(success)");
781 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100782 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200783 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100784 kvm->arch.user_stsi = 1;
785 r = 0;
786 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200787 case KVM_CAP_S390_USER_INSTR0:
788 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
789 kvm->arch.user_instr0 = 1;
790 icpt_operexc_on_all_vcpus(kvm);
791 r = 0;
792 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200793 default:
794 r = -EINVAL;
795 break;
796 }
797 return r;
798}
799
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100800static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
801{
802 int ret;
803
804 switch (attr->attr) {
805 case KVM_S390_VM_MEM_LIMIT_SIZE:
806 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200807 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100808 kvm->arch.mem_limit);
809 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100810 ret = -EFAULT;
811 break;
812 default:
813 ret = -ENXIO;
814 break;
815 }
816 return ret;
817}
818
819static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200820{
821 int ret;
822 unsigned int idx;
823 switch (attr->attr) {
824 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100825 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100826 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200827 break;
828
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200829 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200830 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100831 if (kvm->created_vcpus)
832 ret = -EBUSY;
833 else if (kvm->mm->context.allow_gmap_hpage_1m)
834 ret = -EINVAL;
835 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200836 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100837 /* Not compatible with cmma. */
838 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200839 ret = 0;
840 }
841 mutex_unlock(&kvm->lock);
842 break;
843 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100844 ret = -ENXIO;
845 if (!sclp.has_cmma)
846 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200847 ret = -EINVAL;
848 if (!kvm->arch.use_cmma)
849 break;
850
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200851 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200852 mutex_lock(&kvm->lock);
853 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200854 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200855 srcu_read_unlock(&kvm->srcu, idx);
856 mutex_unlock(&kvm->lock);
857 ret = 0;
858 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100859 case KVM_S390_VM_MEM_LIMIT_SIZE: {
860 unsigned long new_limit;
861
862 if (kvm_is_ucontrol(kvm))
863 return -EINVAL;
864
865 if (get_user(new_limit, (u64 __user *)attr->addr))
866 return -EFAULT;
867
Dominik Dingela3a92c32014-12-01 17:24:42 +0100868 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
869 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100870 return -E2BIG;
871
Dominik Dingela3a92c32014-12-01 17:24:42 +0100872 if (!new_limit)
873 return -EINVAL;
874
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100875 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100876 if (new_limit != KVM_S390_NO_MEM_LIMIT)
877 new_limit -= 1;
878
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100879 ret = -EBUSY;
880 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200881 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100882 /* gmap_create will round the limit up */
883 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100884
885 if (!new) {
886 ret = -ENOMEM;
887 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100888 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100889 new->private = kvm;
890 kvm->arch.gmap = new;
891 ret = 0;
892 }
893 }
894 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100895 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
896 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
897 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100898 break;
899 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200900 default:
901 ret = -ENXIO;
902 break;
903 }
904 return ret;
905}
906
Tony Krowiaka374e892014-09-03 10:13:53 +0200907static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
908
Tony Krowiak20c922f2018-04-22 11:37:03 -0400909void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200910{
911 struct kvm_vcpu *vcpu;
912 int i;
913
Tony Krowiak20c922f2018-04-22 11:37:03 -0400914 kvm_s390_vcpu_block_all(kvm);
915
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400916 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400917 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400918 /* recreate the shadow crycb by leaving the VSIE handler */
919 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
920 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400921
922 kvm_s390_vcpu_unblock_all(kvm);
923}
924
925static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
926{
Tony Krowiaka374e892014-09-03 10:13:53 +0200927 mutex_lock(&kvm->lock);
928 switch (attr->attr) {
929 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200930 if (!test_kvm_facility(kvm, 76)) {
931 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400932 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200933 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200934 get_random_bytes(
935 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
936 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
937 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200938 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200939 break;
940 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200941 if (!test_kvm_facility(kvm, 76)) {
942 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400943 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200944 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200945 get_random_bytes(
946 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
947 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
948 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200949 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200950 break;
951 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200952 if (!test_kvm_facility(kvm, 76)) {
953 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400954 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200955 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200956 kvm->arch.crypto.aes_kw = 0;
957 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
958 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200959 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200960 break;
961 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200962 if (!test_kvm_facility(kvm, 76)) {
963 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400964 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200965 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200966 kvm->arch.crypto.dea_kw = 0;
967 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
968 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200969 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200970 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400971 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
972 if (!ap_instructions_available()) {
973 mutex_unlock(&kvm->lock);
974 return -EOPNOTSUPP;
975 }
976 kvm->arch.crypto.apie = 1;
977 break;
978 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
979 if (!ap_instructions_available()) {
980 mutex_unlock(&kvm->lock);
981 return -EOPNOTSUPP;
982 }
983 kvm->arch.crypto.apie = 0;
984 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200985 default:
986 mutex_unlock(&kvm->lock);
987 return -ENXIO;
988 }
989
Tony Krowiak20c922f2018-04-22 11:37:03 -0400990 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200991 mutex_unlock(&kvm->lock);
992 return 0;
993}
994
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200995static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
996{
997 int cx;
998 struct kvm_vcpu *vcpu;
999
1000 kvm_for_each_vcpu(cx, vcpu, kvm)
1001 kvm_s390_sync_request(req, vcpu);
1002}
1003
1004/*
1005 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001006 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001007 */
1008static int kvm_s390_vm_start_migration(struct kvm *kvm)
1009{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001011 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001012 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001013 int slotnr;
1014
1015 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001016 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001018 slots = kvm_memslots(kvm);
1019 if (!slots || !slots->used_slots)
1020 return -EINVAL;
1021
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001022 if (!kvm->arch.use_cmma) {
1023 kvm->arch.migration_mode = 1;
1024 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001025 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001026 /* mark all the pages in active slots as dirty */
1027 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1028 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001029 if (!ms->dirty_bitmap)
1030 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001031 /*
1032 * The second half of the bitmap is only used on x86,
1033 * and would be wasted otherwise, so we put it to good
1034 * use here to keep track of the state of the storage
1035 * attributes.
1036 */
1037 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1038 ram_pages += ms->npages;
1039 }
1040 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1041 kvm->arch.migration_mode = 1;
1042 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001043 return 0;
1044}
1045
1046/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001047 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001048 * kvm_s390_vm_start_migration.
1049 */
1050static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1051{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001052 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001053 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001054 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001055 kvm->arch.migration_mode = 0;
1056 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001058 return 0;
1059}
1060
1061static int kvm_s390_vm_set_migration(struct kvm *kvm,
1062 struct kvm_device_attr *attr)
1063{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001064 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001065
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001066 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001067 switch (attr->attr) {
1068 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001069 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001070 break;
1071 case KVM_S390_VM_MIGRATION_STOP:
1072 res = kvm_s390_vm_stop_migration(kvm);
1073 break;
1074 default:
1075 break;
1076 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001077 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001078
1079 return res;
1080}
1081
1082static int kvm_s390_vm_get_migration(struct kvm *kvm,
1083 struct kvm_device_attr *attr)
1084{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001085 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001086
1087 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1088 return -ENXIO;
1089
1090 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1091 return -EFAULT;
1092 return 0;
1093}
1094
Collin L. Walling8fa16962016-07-26 15:29:44 -04001095static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1096{
1097 struct kvm_s390_vm_tod_clock gtod;
1098
1099 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1100 return -EFAULT;
1101
David Hildenbrand0e7def52018-02-07 12:46:43 +01001102 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001103 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001104 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001105
1106 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1107 gtod.epoch_idx, gtod.tod);
1108
1109 return 0;
1110}
1111
Jason J. Herne72f25022014-11-25 09:46:02 -05001112static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1113{
1114 u8 gtod_high;
1115
1116 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1117 sizeof(gtod_high)))
1118 return -EFAULT;
1119
1120 if (gtod_high != 0)
1121 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001122 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001123
1124 return 0;
1125}
1126
1127static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1128{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001129 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001130
David Hildenbrand0e7def52018-02-07 12:46:43 +01001131 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1132 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001133 return -EFAULT;
1134
David Hildenbrand0e7def52018-02-07 12:46:43 +01001135 kvm_s390_set_tod_clock(kvm, &gtod);
1136 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001137 return 0;
1138}
1139
1140static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1141{
1142 int ret;
1143
1144 if (attr->flags)
1145 return -EINVAL;
1146
1147 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001148 case KVM_S390_VM_TOD_EXT:
1149 ret = kvm_s390_set_tod_ext(kvm, attr);
1150 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001151 case KVM_S390_VM_TOD_HIGH:
1152 ret = kvm_s390_set_tod_high(kvm, attr);
1153 break;
1154 case KVM_S390_VM_TOD_LOW:
1155 ret = kvm_s390_set_tod_low(kvm, attr);
1156 break;
1157 default:
1158 ret = -ENXIO;
1159 break;
1160 }
1161 return ret;
1162}
1163
David Hildenbrand33d1b272018-04-27 14:36:13 +02001164static void kvm_s390_get_tod_clock(struct kvm *kvm,
1165 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001166{
1167 struct kvm_s390_tod_clock_ext htod;
1168
1169 preempt_disable();
1170
1171 get_tod_clock_ext((char *)&htod);
1172
1173 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001174 gtod->epoch_idx = 0;
1175 if (test_kvm_facility(kvm, 139)) {
1176 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1177 if (gtod->tod < htod.tod)
1178 gtod->epoch_idx += 1;
1179 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001180
1181 preempt_enable();
1182}
1183
1184static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1185{
1186 struct kvm_s390_vm_tod_clock gtod;
1187
1188 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001189 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001190 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1191 return -EFAULT;
1192
1193 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1194 gtod.epoch_idx, gtod.tod);
1195 return 0;
1196}
1197
Jason J. Herne72f25022014-11-25 09:46:02 -05001198static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1199{
1200 u8 gtod_high = 0;
1201
1202 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1203 sizeof(gtod_high)))
1204 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001205 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001206
1207 return 0;
1208}
1209
1210static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1211{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001212 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001213
David Hildenbrand60417fc2015-09-29 16:20:36 +02001214 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001215 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1216 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001217 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001218
1219 return 0;
1220}
1221
1222static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1223{
1224 int ret;
1225
1226 if (attr->flags)
1227 return -EINVAL;
1228
1229 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001230 case KVM_S390_VM_TOD_EXT:
1231 ret = kvm_s390_get_tod_ext(kvm, attr);
1232 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001233 case KVM_S390_VM_TOD_HIGH:
1234 ret = kvm_s390_get_tod_high(kvm, attr);
1235 break;
1236 case KVM_S390_VM_TOD_LOW:
1237 ret = kvm_s390_get_tod_low(kvm, attr);
1238 break;
1239 default:
1240 ret = -ENXIO;
1241 break;
1242 }
1243 return ret;
1244}
1245
Michael Mueller658b6ed2015-02-02 15:49:35 +01001246static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1247{
1248 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001249 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001250 int ret = 0;
1251
1252 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001253 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001254 ret = -EBUSY;
1255 goto out;
1256 }
1257 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1258 if (!proc) {
1259 ret = -ENOMEM;
1260 goto out;
1261 }
1262 if (!copy_from_user(proc, (void __user *)attr->addr,
1263 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001264 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001265 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1266 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001267 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001268 if (proc->ibc > unblocked_ibc)
1269 kvm->arch.model.ibc = unblocked_ibc;
1270 else if (proc->ibc < lowest_ibc)
1271 kvm->arch.model.ibc = lowest_ibc;
1272 else
1273 kvm->arch.model.ibc = proc->ibc;
1274 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001275 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001276 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001277 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1278 kvm->arch.model.ibc,
1279 kvm->arch.model.cpuid);
1280 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1281 kvm->arch.model.fac_list[0],
1282 kvm->arch.model.fac_list[1],
1283 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001284 } else
1285 ret = -EFAULT;
1286 kfree(proc);
1287out:
1288 mutex_unlock(&kvm->lock);
1289 return ret;
1290}
1291
David Hildenbrand15c97052015-03-19 17:36:43 +01001292static int kvm_s390_set_processor_feat(struct kvm *kvm,
1293 struct kvm_device_attr *attr)
1294{
1295 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001296
1297 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1298 return -EFAULT;
1299 if (!bitmap_subset((unsigned long *) data.feat,
1300 kvm_s390_available_cpu_feat,
1301 KVM_S390_VM_CPU_FEAT_NR_BITS))
1302 return -EINVAL;
1303
1304 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001305 if (kvm->created_vcpus) {
1306 mutex_unlock(&kvm->lock);
1307 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001308 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001309 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1310 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001311 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001312 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1313 data.feat[0],
1314 data.feat[1],
1315 data.feat[2]);
1316 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001317}
1318
David Hildenbrand0a763c72016-05-18 16:03:47 +02001319static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1320 struct kvm_device_attr *attr)
1321{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001322 mutex_lock(&kvm->lock);
1323 if (kvm->created_vcpus) {
1324 mutex_unlock(&kvm->lock);
1325 return -EBUSY;
1326 }
1327
1328 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1329 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1330 mutex_unlock(&kvm->lock);
1331 return -EFAULT;
1332 }
1333 mutex_unlock(&kvm->lock);
1334
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001335 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1339 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1340 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1341 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1342 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1343 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1345 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1346 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1348 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1349 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1350 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1351 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1352 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1354 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1355 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1356 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1357 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1358 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1360 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1361 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1363 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1364 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1366 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1367 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1369 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1370 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1371 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1372 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1373 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1374 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1375 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1376 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1378 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001379 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1381 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001382 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1386 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001387 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1391 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001392
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001393 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001394}
1395
Michael Mueller658b6ed2015-02-02 15:49:35 +01001396static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1397{
1398 int ret = -ENXIO;
1399
1400 switch (attr->attr) {
1401 case KVM_S390_VM_CPU_PROCESSOR:
1402 ret = kvm_s390_set_processor(kvm, attr);
1403 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001404 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1405 ret = kvm_s390_set_processor_feat(kvm, attr);
1406 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001407 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1408 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1409 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001410 }
1411 return ret;
1412}
1413
1414static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1415{
1416 struct kvm_s390_vm_cpu_processor *proc;
1417 int ret = 0;
1418
1419 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1420 if (!proc) {
1421 ret = -ENOMEM;
1422 goto out;
1423 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001424 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001425 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001426 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1427 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001428 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1429 kvm->arch.model.ibc,
1430 kvm->arch.model.cpuid);
1431 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1432 kvm->arch.model.fac_list[0],
1433 kvm->arch.model.fac_list[1],
1434 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001435 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1436 ret = -EFAULT;
1437 kfree(proc);
1438out:
1439 return ret;
1440}
1441
1442static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1443{
1444 struct kvm_s390_vm_cpu_machine *mach;
1445 int ret = 0;
1446
1447 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1448 if (!mach) {
1449 ret = -ENOMEM;
1450 goto out;
1451 }
1452 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001453 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001454 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001455 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001456 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001457 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001458 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1459 kvm->arch.model.ibc,
1460 kvm->arch.model.cpuid);
1461 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1462 mach->fac_mask[0],
1463 mach->fac_mask[1],
1464 mach->fac_mask[2]);
1465 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1466 mach->fac_list[0],
1467 mach->fac_list[1],
1468 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001469 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1470 ret = -EFAULT;
1471 kfree(mach);
1472out:
1473 return ret;
1474}
1475
David Hildenbrand15c97052015-03-19 17:36:43 +01001476static int kvm_s390_get_processor_feat(struct kvm *kvm,
1477 struct kvm_device_attr *attr)
1478{
1479 struct kvm_s390_vm_cpu_feat data;
1480
1481 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1482 KVM_S390_VM_CPU_FEAT_NR_BITS);
1483 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1484 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001485 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1486 data.feat[0],
1487 data.feat[1],
1488 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001489 return 0;
1490}
1491
1492static int kvm_s390_get_machine_feat(struct kvm *kvm,
1493 struct kvm_device_attr *attr)
1494{
1495 struct kvm_s390_vm_cpu_feat data;
1496
1497 bitmap_copy((unsigned long *) data.feat,
1498 kvm_s390_available_cpu_feat,
1499 KVM_S390_VM_CPU_FEAT_NR_BITS);
1500 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1501 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001502 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1503 data.feat[0],
1504 data.feat[1],
1505 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001506 return 0;
1507}
1508
David Hildenbrand0a763c72016-05-18 16:03:47 +02001509static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1510 struct kvm_device_attr *attr)
1511{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001512 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1513 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1514 return -EFAULT;
1515
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001516 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1520 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1521 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1522 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1523 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1524 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1526 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1527 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1529 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1530 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1531 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1532 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1533 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1535 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1536 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1537 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1538 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1539 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1541 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1542 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1544 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1545 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1547 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1548 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1550 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1551 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1552 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1553 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1554 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1555 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1556 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1557 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1559 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001560 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1562 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001563 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1567 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001568 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1572 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001573
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001574 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001575}
1576
1577static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1578 struct kvm_device_attr *attr)
1579{
1580 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1581 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1582 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001583
1584 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1585 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1586 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1587 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1588 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1589 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1590 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1591 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1592 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1594 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1595 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1597 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1598 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1599 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1600 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1601 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1602 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1603 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1604 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1605 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1606 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1607 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1608 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1609 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1610 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1611 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1612 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1613 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1614 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1615 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1616 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1617 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1618 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1619 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1620 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1621 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1622 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1623 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1624 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1625 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1626 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1627 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001628 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1629 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1630 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001631 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1632 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1633 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1634 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1635 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001636 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1637 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1638 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1639 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1640 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001641
David Hildenbrand0a763c72016-05-18 16:03:47 +02001642 return 0;
1643}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001644
Michael Mueller658b6ed2015-02-02 15:49:35 +01001645static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1646{
1647 int ret = -ENXIO;
1648
1649 switch (attr->attr) {
1650 case KVM_S390_VM_CPU_PROCESSOR:
1651 ret = kvm_s390_get_processor(kvm, attr);
1652 break;
1653 case KVM_S390_VM_CPU_MACHINE:
1654 ret = kvm_s390_get_machine(kvm, attr);
1655 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001656 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1657 ret = kvm_s390_get_processor_feat(kvm, attr);
1658 break;
1659 case KVM_S390_VM_CPU_MACHINE_FEAT:
1660 ret = kvm_s390_get_machine_feat(kvm, attr);
1661 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001662 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1663 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1664 break;
1665 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1666 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1667 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001668 }
1669 return ret;
1670}
1671
Dominik Dingelf2061652014-04-09 13:13:00 +02001672static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1673{
1674 int ret;
1675
1676 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001677 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001678 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001679 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001680 case KVM_S390_VM_TOD:
1681 ret = kvm_s390_set_tod(kvm, attr);
1682 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001683 case KVM_S390_VM_CPU_MODEL:
1684 ret = kvm_s390_set_cpu_model(kvm, attr);
1685 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001686 case KVM_S390_VM_CRYPTO:
1687 ret = kvm_s390_vm_set_crypto(kvm, attr);
1688 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001689 case KVM_S390_VM_MIGRATION:
1690 ret = kvm_s390_vm_set_migration(kvm, attr);
1691 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001692 default:
1693 ret = -ENXIO;
1694 break;
1695 }
1696
1697 return ret;
1698}
1699
1700static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1701{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001702 int ret;
1703
1704 switch (attr->group) {
1705 case KVM_S390_VM_MEM_CTRL:
1706 ret = kvm_s390_get_mem_control(kvm, attr);
1707 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001708 case KVM_S390_VM_TOD:
1709 ret = kvm_s390_get_tod(kvm, attr);
1710 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001711 case KVM_S390_VM_CPU_MODEL:
1712 ret = kvm_s390_get_cpu_model(kvm, attr);
1713 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001714 case KVM_S390_VM_MIGRATION:
1715 ret = kvm_s390_vm_get_migration(kvm, attr);
1716 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001717 default:
1718 ret = -ENXIO;
1719 break;
1720 }
1721
1722 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001723}
1724
1725static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1726{
1727 int ret;
1728
1729 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001730 case KVM_S390_VM_MEM_CTRL:
1731 switch (attr->attr) {
1732 case KVM_S390_VM_MEM_ENABLE_CMMA:
1733 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001734 ret = sclp.has_cmma ? 0 : -ENXIO;
1735 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001736 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001737 ret = 0;
1738 break;
1739 default:
1740 ret = -ENXIO;
1741 break;
1742 }
1743 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001744 case KVM_S390_VM_TOD:
1745 switch (attr->attr) {
1746 case KVM_S390_VM_TOD_LOW:
1747 case KVM_S390_VM_TOD_HIGH:
1748 ret = 0;
1749 break;
1750 default:
1751 ret = -ENXIO;
1752 break;
1753 }
1754 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001755 case KVM_S390_VM_CPU_MODEL:
1756 switch (attr->attr) {
1757 case KVM_S390_VM_CPU_PROCESSOR:
1758 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001759 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1760 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001761 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001762 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001763 ret = 0;
1764 break;
1765 default:
1766 ret = -ENXIO;
1767 break;
1768 }
1769 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001770 case KVM_S390_VM_CRYPTO:
1771 switch (attr->attr) {
1772 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1773 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1774 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1775 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1776 ret = 0;
1777 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001778 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1779 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1780 ret = ap_instructions_available() ? 0 : -ENXIO;
1781 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001782 default:
1783 ret = -ENXIO;
1784 break;
1785 }
1786 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001787 case KVM_S390_VM_MIGRATION:
1788 ret = 0;
1789 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001790 default:
1791 ret = -ENXIO;
1792 break;
1793 }
1794
1795 return ret;
1796}
1797
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001798static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1799{
1800 uint8_t *keys;
1801 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001802 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001803
1804 if (args->flags != 0)
1805 return -EINVAL;
1806
1807 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001808 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001809 return KVM_S390_GET_SKEYS_NONE;
1810
1811 /* Enforce sane limit on memory allocation */
1812 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1813 return -EINVAL;
1814
Michal Hocko752ade62017-05-08 15:57:27 -07001815 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001816 if (!keys)
1817 return -ENOMEM;
1818
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001819 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001820 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001821 for (i = 0; i < args->count; i++) {
1822 hva = gfn_to_hva(kvm, args->start_gfn + i);
1823 if (kvm_is_error_hva(hva)) {
1824 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001825 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001826 }
1827
David Hildenbrand154c8c12016-05-09 11:22:34 +02001828 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1829 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001830 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001831 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001832 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001833 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001834
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001835 if (!r) {
1836 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1837 sizeof(uint8_t) * args->count);
1838 if (r)
1839 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001840 }
1841
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001842 kvfree(keys);
1843 return r;
1844}
1845
1846static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1847{
1848 uint8_t *keys;
1849 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001850 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001851 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001852
1853 if (args->flags != 0)
1854 return -EINVAL;
1855
1856 /* Enforce sane limit on memory allocation */
1857 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1858 return -EINVAL;
1859
Michal Hocko752ade62017-05-08 15:57:27 -07001860 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001861 if (!keys)
1862 return -ENOMEM;
1863
1864 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1865 sizeof(uint8_t) * args->count);
1866 if (r) {
1867 r = -EFAULT;
1868 goto out;
1869 }
1870
1871 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001872 r = s390_enable_skey();
1873 if (r)
1874 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001875
Janosch Frankbd096f62018-07-18 13:40:22 +01001876 i = 0;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001877 mmap_read_lock(current->mm);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001878 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001879 while (i < args->count) {
1880 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001881 hva = gfn_to_hva(kvm, args->start_gfn + i);
1882 if (kvm_is_error_hva(hva)) {
1883 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001884 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001885 }
1886
1887 /* Lowest order bit is reserved */
1888 if (keys[i] & 0x01) {
1889 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001890 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001891 }
1892
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001893 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001894 if (r) {
Peter Xu64019a22020-08-11 18:39:01 -07001895 r = fixup_user_fault(current->mm, hva,
Janosch Frankbd096f62018-07-18 13:40:22 +01001896 FAULT_FLAG_WRITE, &unlocked);
1897 if (r)
1898 break;
1899 }
1900 if (!r)
1901 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001902 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001903 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001904 mmap_read_unlock(current->mm);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001905out:
1906 kvfree(keys);
1907 return r;
1908}
1909
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001910/*
1911 * Base address and length must be sent at the start of each block, therefore
1912 * it's cheaper to send some clean data, as long as it's less than the size of
1913 * two longs.
1914 */
1915#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1916/* for consistency */
1917#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1918
1919/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001920 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1921 * address falls in a hole. In that case the index of one of the memslots
1922 * bordering the hole is returned.
1923 */
1924static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1925{
1926 int start = 0, end = slots->used_slots;
1927 int slot = atomic_read(&slots->lru_slot);
1928 struct kvm_memory_slot *memslots = slots->memslots;
1929
1930 if (gfn >= memslots[slot].base_gfn &&
1931 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1932 return slot;
1933
1934 while (start < end) {
1935 slot = start + (end - start) / 2;
1936
1937 if (gfn >= memslots[slot].base_gfn)
1938 end = slot;
1939 else
1940 start = slot + 1;
1941 }
1942
Sean Christopherson97daa022020-04-07 23:40:59 -07001943 if (start >= slots->used_slots)
1944 return slots->used_slots - 1;
1945
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001946 if (gfn >= memslots[start].base_gfn &&
1947 gfn < memslots[start].base_gfn + memslots[start].npages) {
1948 atomic_set(&slots->lru_slot, start);
1949 }
1950
1951 return start;
1952}
1953
1954static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1955 u8 *res, unsigned long bufsize)
1956{
1957 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1958
1959 args->count = 0;
1960 while (args->count < bufsize) {
1961 hva = gfn_to_hva(kvm, cur_gfn);
1962 /*
1963 * We return an error if the first value was invalid, but we
1964 * return successfully if at least one value was copied.
1965 */
1966 if (kvm_is_error_hva(hva))
1967 return args->count ? 0 : -EFAULT;
1968 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1969 pgstev = 0;
1970 res[args->count++] = (pgstev >> 24) & 0x43;
1971 cur_gfn++;
1972 }
1973
1974 return 0;
1975}
1976
1977static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1978 unsigned long cur_gfn)
1979{
1980 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1981 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1982 unsigned long ofs = cur_gfn - ms->base_gfn;
1983
1984 if (ms->base_gfn + ms->npages <= cur_gfn) {
1985 slotidx--;
1986 /* If we are above the highest slot, wrap around */
1987 if (slotidx < 0)
1988 slotidx = slots->used_slots - 1;
1989
1990 ms = slots->memslots + slotidx;
1991 ofs = 0;
1992 }
1993 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1994 while ((slotidx > 0) && (ofs >= ms->npages)) {
1995 slotidx--;
1996 ms = slots->memslots + slotidx;
1997 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1998 }
1999 return ms->base_gfn + ofs;
2000}
2001
2002static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
2003 u8 *res, unsigned long bufsize)
2004{
2005 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2006 struct kvm_memslots *slots = kvm_memslots(kvm);
2007 struct kvm_memory_slot *ms;
2008
Sean Christopherson0774a962020-03-20 13:55:40 -07002009 if (unlikely(!slots->used_slots))
2010 return 0;
2011
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002012 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2013 ms = gfn_to_memslot(kvm, cur_gfn);
2014 args->count = 0;
2015 args->start_gfn = cur_gfn;
2016 if (!ms)
2017 return 0;
2018 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2019 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2020
2021 while (args->count < bufsize) {
2022 hva = gfn_to_hva(kvm, cur_gfn);
2023 if (kvm_is_error_hva(hva))
2024 return 0;
2025 /* Decrement only if we actually flipped the bit to 0 */
2026 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2027 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2028 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2029 pgstev = 0;
2030 /* Save the value */
2031 res[args->count++] = (pgstev >> 24) & 0x43;
2032 /* If the next bit is too far away, stop. */
2033 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2034 return 0;
2035 /* If we reached the previous "next", find the next one */
2036 if (cur_gfn == next_gfn)
2037 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2038 /* Reached the end of memory or of the buffer, stop */
2039 if ((next_gfn >= mem_end) ||
2040 (next_gfn - args->start_gfn >= bufsize))
2041 return 0;
2042 cur_gfn++;
2043 /* Reached the end of the current memslot, take the next one. */
2044 if (cur_gfn - ms->base_gfn >= ms->npages) {
2045 ms = gfn_to_memslot(kvm, cur_gfn);
2046 if (!ms)
2047 return 0;
2048 }
2049 }
2050 return 0;
2051}
2052
2053/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002054 * This function searches for the next page with dirty CMMA attributes, and
2055 * saves the attributes in the buffer up to either the end of the buffer or
2056 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2057 * no trailing clean bytes are saved.
2058 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2059 * output buffer will indicate 0 as length.
2060 */
2061static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2062 struct kvm_s390_cmma_log *args)
2063{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002064 unsigned long bufsize;
2065 int srcu_idx, peek, ret;
2066 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002067
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002068 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002069 return -ENXIO;
2070 /* Invalid/unsupported flags were specified */
2071 if (args->flags & ~KVM_S390_CMMA_PEEK)
2072 return -EINVAL;
2073 /* Migration mode query, and we are not doing a migration */
2074 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002075 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076 return -EINVAL;
2077 /* CMMA is disabled or was not used, or the buffer has length zero */
2078 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002079 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002080 memset(args, 0, sizeof(*args));
2081 return 0;
2082 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002083 /* We are not peeking, and there are no dirty pages */
2084 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2085 memset(args, 0, sizeof(*args));
2086 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087 }
2088
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002089 values = vmalloc(bufsize);
2090 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002091 return -ENOMEM;
2092
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002093 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002094 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002095 if (peek)
2096 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2097 else
2098 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002099 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002100 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002101
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002102 if (kvm->arch.migration_mode)
2103 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2104 else
2105 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002106
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002107 if (copy_to_user((void __user *)args->values, values, args->count))
2108 ret = -EFAULT;
2109
2110 vfree(values);
2111 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002112}
2113
2114/*
2115 * This function sets the CMMA attributes for the given pages. If the input
2116 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002117 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002118 */
2119static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2120 const struct kvm_s390_cmma_log *args)
2121{
2122 unsigned long hva, mask, pgstev, i;
2123 uint8_t *bits;
2124 int srcu_idx, r = 0;
2125
2126 mask = args->mask;
2127
2128 if (!kvm->arch.use_cmma)
2129 return -ENXIO;
2130 /* invalid/unsupported flags */
2131 if (args->flags != 0)
2132 return -EINVAL;
2133 /* Enforce sane limit on memory allocation */
2134 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2135 return -EINVAL;
2136 /* Nothing to do */
2137 if (args->count == 0)
2138 return 0;
2139
Kees Cook42bc47b2018-06-12 14:27:11 -07002140 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002141 if (!bits)
2142 return -ENOMEM;
2143
2144 r = copy_from_user(bits, (void __user *)args->values, args->count);
2145 if (r) {
2146 r = -EFAULT;
2147 goto out;
2148 }
2149
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002150 mmap_read_lock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002151 srcu_idx = srcu_read_lock(&kvm->srcu);
2152 for (i = 0; i < args->count; i++) {
2153 hva = gfn_to_hva(kvm, args->start_gfn + i);
2154 if (kvm_is_error_hva(hva)) {
2155 r = -EFAULT;
2156 break;
2157 }
2158
2159 pgstev = bits[i];
2160 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002161 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002162 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2163 }
2164 srcu_read_unlock(&kvm->srcu, srcu_idx);
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002165 mmap_read_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002166
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002167 if (!kvm->mm->context.uses_cmm) {
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002168 mmap_write_lock(kvm->mm);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002169 kvm->mm->context.uses_cmm = 1;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002170 mmap_write_unlock(kvm->mm);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002171 }
2172out:
2173 vfree(bits);
2174 return r;
2175}
2176
Janosch Frank29b40f12019-09-30 04:19:18 -04002177static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2178{
2179 struct kvm_vcpu *vcpu;
2180 u16 rc, rrc;
2181 int ret = 0;
2182 int i;
2183
2184 /*
2185 * We ignore failures and try to destroy as many CPUs as possible.
2186 * At the same time we must not free the assigned resources when
2187 * this fails, as the ultravisor has still access to that memory.
2188 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2189 * behind.
2190 * We want to return the first failure rc and rrc, though.
2191 */
2192 kvm_for_each_vcpu(i, vcpu, kvm) {
2193 mutex_lock(&vcpu->mutex);
2194 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2195 *rcp = rc;
2196 *rrcp = rrc;
2197 ret = -EIO;
2198 }
2199 mutex_unlock(&vcpu->mutex);
2200 }
2201 return ret;
2202}
2203
2204static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2205{
2206 int i, r = 0;
2207 u16 dummy;
2208
2209 struct kvm_vcpu *vcpu;
2210
2211 kvm_for_each_vcpu(i, vcpu, kvm) {
2212 mutex_lock(&vcpu->mutex);
2213 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2214 mutex_unlock(&vcpu->mutex);
2215 if (r)
2216 break;
2217 }
2218 if (r)
2219 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2220 return r;
2221}
2222
2223static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2224{
2225 int r = 0;
2226 u16 dummy;
2227 void __user *argp = (void __user *)cmd->data;
2228
2229 switch (cmd->cmd) {
2230 case KVM_PV_ENABLE: {
2231 r = -EINVAL;
2232 if (kvm_s390_pv_is_protected(kvm))
2233 break;
2234
2235 /*
2236 * FMT 4 SIE needs esca. As we never switch back to bsca from
2237 * esca, we need no cleanup in the error cases below
2238 */
2239 r = sca_switch_to_extended(kvm);
2240 if (r)
2241 break;
2242
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002243 mmap_write_lock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002244 r = gmap_mark_unmergeable();
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07002245 mmap_write_unlock(current->mm);
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002246 if (r)
2247 break;
2248
Janosch Frank29b40f12019-09-30 04:19:18 -04002249 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2250 if (r)
2251 break;
2252
2253 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2254 if (r)
2255 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002256
2257 /* we need to block service interrupts from now on */
2258 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002259 break;
2260 }
2261 case KVM_PV_DISABLE: {
2262 r = -EINVAL;
2263 if (!kvm_s390_pv_is_protected(kvm))
2264 break;
2265
2266 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2267 /*
2268 * If a CPU could not be destroyed, destroy VM will also fail.
2269 * There is no point in trying to destroy it. Instead return
2270 * the rc and rrc from the first CPU that failed destroying.
2271 */
2272 if (r)
2273 break;
2274 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002275
2276 /* no need to block service interrupts any more */
2277 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002278 break;
2279 }
2280 case KVM_PV_SET_SEC_PARMS: {
2281 struct kvm_s390_pv_sec_parm parms = {};
2282 void *hdr;
2283
2284 r = -EINVAL;
2285 if (!kvm_s390_pv_is_protected(kvm))
2286 break;
2287
2288 r = -EFAULT;
2289 if (copy_from_user(&parms, argp, sizeof(parms)))
2290 break;
2291
2292 /* Currently restricted to 8KB */
2293 r = -EINVAL;
2294 if (parms.length > PAGE_SIZE * 2)
2295 break;
2296
2297 r = -ENOMEM;
2298 hdr = vmalloc(parms.length);
2299 if (!hdr)
2300 break;
2301
2302 r = -EFAULT;
2303 if (!copy_from_user(hdr, (void __user *)parms.origin,
2304 parms.length))
2305 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2306 &cmd->rc, &cmd->rrc);
2307
2308 vfree(hdr);
2309 break;
2310 }
2311 case KVM_PV_UNPACK: {
2312 struct kvm_s390_pv_unp unp = {};
2313
2314 r = -EINVAL;
Janosch Frank1ed576a2020-10-20 06:12:07 -04002315 if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm))
Janosch Frank29b40f12019-09-30 04:19:18 -04002316 break;
2317
2318 r = -EFAULT;
2319 if (copy_from_user(&unp, argp, sizeof(unp)))
2320 break;
2321
2322 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2323 &cmd->rc, &cmd->rrc);
2324 break;
2325 }
2326 case KVM_PV_VERIFY: {
2327 r = -EINVAL;
2328 if (!kvm_s390_pv_is_protected(kvm))
2329 break;
2330
2331 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2332 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2333 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2334 cmd->rrc);
2335 break;
2336 }
Janosch Franke0d27732019-05-09 13:07:21 +02002337 case KVM_PV_PREP_RESET: {
2338 r = -EINVAL;
2339 if (!kvm_s390_pv_is_protected(kvm))
2340 break;
2341
2342 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2343 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2344 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2345 cmd->rc, cmd->rrc);
2346 break;
2347 }
2348 case KVM_PV_UNSHARE_ALL: {
2349 r = -EINVAL;
2350 if (!kvm_s390_pv_is_protected(kvm))
2351 break;
2352
2353 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2354 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2355 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2356 cmd->rc, cmd->rrc);
2357 break;
2358 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002359 default:
2360 r = -ENOTTY;
2361 }
2362 return r;
2363}
2364
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002365long kvm_arch_vm_ioctl(struct file *filp,
2366 unsigned int ioctl, unsigned long arg)
2367{
2368 struct kvm *kvm = filp->private_data;
2369 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002370 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002371 int r;
2372
2373 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002374 case KVM_S390_INTERRUPT: {
2375 struct kvm_s390_interrupt s390int;
2376
2377 r = -EFAULT;
2378 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2379 break;
2380 r = kvm_s390_inject_vm(kvm, &s390int);
2381 break;
2382 }
Cornelia Huck84223592013-07-15 13:36:01 +02002383 case KVM_CREATE_IRQCHIP: {
2384 struct kvm_irq_routing_entry routing;
2385
2386 r = -EINVAL;
2387 if (kvm->arch.use_irqchip) {
2388 /* Set up dummy routing. */
2389 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002390 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002391 }
2392 break;
2393 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002394 case KVM_SET_DEVICE_ATTR: {
2395 r = -EFAULT;
2396 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2397 break;
2398 r = kvm_s390_vm_set_attr(kvm, &attr);
2399 break;
2400 }
2401 case KVM_GET_DEVICE_ATTR: {
2402 r = -EFAULT;
2403 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2404 break;
2405 r = kvm_s390_vm_get_attr(kvm, &attr);
2406 break;
2407 }
2408 case KVM_HAS_DEVICE_ATTR: {
2409 r = -EFAULT;
2410 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2411 break;
2412 r = kvm_s390_vm_has_attr(kvm, &attr);
2413 break;
2414 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002415 case KVM_S390_GET_SKEYS: {
2416 struct kvm_s390_skeys args;
2417
2418 r = -EFAULT;
2419 if (copy_from_user(&args, argp,
2420 sizeof(struct kvm_s390_skeys)))
2421 break;
2422 r = kvm_s390_get_skeys(kvm, &args);
2423 break;
2424 }
2425 case KVM_S390_SET_SKEYS: {
2426 struct kvm_s390_skeys args;
2427
2428 r = -EFAULT;
2429 if (copy_from_user(&args, argp,
2430 sizeof(struct kvm_s390_skeys)))
2431 break;
2432 r = kvm_s390_set_skeys(kvm, &args);
2433 break;
2434 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002435 case KVM_S390_GET_CMMA_BITS: {
2436 struct kvm_s390_cmma_log args;
2437
2438 r = -EFAULT;
2439 if (copy_from_user(&args, argp, sizeof(args)))
2440 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002441 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002442 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002443 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002444 if (!r) {
2445 r = copy_to_user(argp, &args, sizeof(args));
2446 if (r)
2447 r = -EFAULT;
2448 }
2449 break;
2450 }
2451 case KVM_S390_SET_CMMA_BITS: {
2452 struct kvm_s390_cmma_log args;
2453
2454 r = -EFAULT;
2455 if (copy_from_user(&args, argp, sizeof(args)))
2456 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002457 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002458 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002459 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002460 break;
2461 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002462 case KVM_S390_PV_COMMAND: {
2463 struct kvm_pv_cmd args;
2464
Janosch Frankfe28c7862019-05-15 13:24:30 +02002465 /* protvirt means user sigp */
2466 kvm->arch.user_cpu_state_ctrl = 1;
Janosch Frank29b40f12019-09-30 04:19:18 -04002467 r = 0;
2468 if (!is_prot_virt_host()) {
2469 r = -EINVAL;
2470 break;
2471 }
2472 if (copy_from_user(&args, argp, sizeof(args))) {
2473 r = -EFAULT;
2474 break;
2475 }
2476 if (args.flags) {
2477 r = -EINVAL;
2478 break;
2479 }
2480 mutex_lock(&kvm->lock);
2481 r = kvm_s390_handle_pv(kvm, &args);
2482 mutex_unlock(&kvm->lock);
2483 if (copy_to_user(argp, &args, sizeof(args))) {
2484 r = -EFAULT;
2485 break;
2486 }
2487 break;
2488 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002490 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002491 }
2492
2493 return r;
2494}
2495
Tony Krowiak45c9b472015-01-13 11:33:26 -05002496static int kvm_s390_apxa_installed(void)
2497{
Tony Krowiake585b242018-09-25 19:16:18 -04002498 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002499
Tony Krowiake585b242018-09-25 19:16:18 -04002500 if (ap_instructions_available()) {
2501 if (ap_qci(&info) == 0)
2502 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002503 }
2504
2505 return 0;
2506}
2507
Tony Krowiake585b242018-09-25 19:16:18 -04002508/*
2509 * The format of the crypto control block (CRYCB) is specified in the 3 low
2510 * order bits of the CRYCB designation (CRYCBD) field as follows:
2511 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2512 * AP extended addressing (APXA) facility are installed.
2513 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2514 * Format 2: Both the APXA and MSAX3 facilities are installed
2515 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002516static void kvm_s390_set_crycb_format(struct kvm *kvm)
2517{
2518 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2519
Tony Krowiake585b242018-09-25 19:16:18 -04002520 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2521 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2522
2523 /* Check whether MSAX3 is installed */
2524 if (!test_kvm_facility(kvm, 76))
2525 return;
2526
Tony Krowiak45c9b472015-01-13 11:33:26 -05002527 if (kvm_s390_apxa_installed())
2528 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2529 else
2530 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2531}
2532
Pierre Morel0e237e42018-10-05 10:31:09 +02002533void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2534 unsigned long *aqm, unsigned long *adm)
2535{
2536 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2537
2538 mutex_lock(&kvm->lock);
2539 kvm_s390_vcpu_block_all(kvm);
2540
2541 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2542 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2543 memcpy(crycb->apcb1.apm, apm, 32);
2544 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2545 apm[0], apm[1], apm[2], apm[3]);
2546 memcpy(crycb->apcb1.aqm, aqm, 32);
2547 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2548 aqm[0], aqm[1], aqm[2], aqm[3]);
2549 memcpy(crycb->apcb1.adm, adm, 32);
2550 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2551 adm[0], adm[1], adm[2], adm[3]);
2552 break;
2553 case CRYCB_FORMAT1:
2554 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2555 memcpy(crycb->apcb0.apm, apm, 8);
2556 memcpy(crycb->apcb0.aqm, aqm, 2);
2557 memcpy(crycb->apcb0.adm, adm, 2);
2558 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2559 apm[0], *((unsigned short *)aqm),
2560 *((unsigned short *)adm));
2561 break;
2562 default: /* Can not happen */
2563 break;
2564 }
2565
2566 /* recreate the shadow crycb for each vcpu */
2567 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2568 kvm_s390_vcpu_unblock_all(kvm);
2569 mutex_unlock(&kvm->lock);
2570}
2571EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2572
Tony Krowiak421045982018-09-25 19:16:25 -04002573void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2574{
2575 mutex_lock(&kvm->lock);
2576 kvm_s390_vcpu_block_all(kvm);
2577
2578 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2579 sizeof(kvm->arch.crypto.crycb->apcb0));
2580 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2581 sizeof(kvm->arch.crypto.crycb->apcb1));
2582
Pierre Morel0e237e42018-10-05 10:31:09 +02002583 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002584 /* recreate the shadow crycb for each vcpu */
2585 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002586 kvm_s390_vcpu_unblock_all(kvm);
2587 mutex_unlock(&kvm->lock);
2588}
2589EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2590
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002591static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002592{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002593 struct cpuid cpuid;
2594
2595 get_cpu_id(&cpuid);
2596 cpuid.version = 0xff;
2597 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002598}
2599
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002600static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002601{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002602 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002603 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002604
Tony Krowiake585b242018-09-25 19:16:18 -04002605 if (!test_kvm_facility(kvm, 76))
2606 return;
2607
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002608 /* Enable AES/DEA protected key functions by default */
2609 kvm->arch.crypto.aes_kw = 1;
2610 kvm->arch.crypto.dea_kw = 1;
2611 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2612 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2613 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2614 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002615}
2616
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002617static void sca_dispose(struct kvm *kvm)
2618{
2619 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002620 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002621 else
2622 free_page((unsigned long)(kvm->arch.sca));
2623 kvm->arch.sca = NULL;
2624}
2625
Carsten Ottee08b9632012-01-04 10:25:20 +01002626int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002627{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002628 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002629 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002630 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002631 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002632
Carsten Ottee08b9632012-01-04 10:25:20 +01002633 rc = -EINVAL;
2634#ifdef CONFIG_KVM_S390_UCONTROL
2635 if (type & ~KVM_VM_S390_UCONTROL)
2636 goto out_err;
2637 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2638 goto out_err;
2639#else
2640 if (type)
2641 goto out_err;
2642#endif
2643
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002644 rc = s390_enable_sie();
2645 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002646 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002647
Carsten Otteb2904112011-10-18 12:27:13 +02002648 rc = -ENOMEM;
2649
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002650 if (!sclp.has_64bscao)
2651 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002652 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002653 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002654 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002655 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002656 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002657 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002658 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002659 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002660 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002661 kvm->arch.sca = (struct bsca_block *)
2662 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002663 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002664
2665 sprintf(debug_name, "kvm-%u", current->pid);
2666
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002667 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002668 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002669 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002670
Michael Mueller19114be2017-05-30 14:26:02 +02002671 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002672 kvm->arch.sie_page2 =
2673 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2674 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002675 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002676
Michael Mueller25c84db2019-01-31 09:52:41 +01002677 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002678 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002679
2680 for (i = 0; i < kvm_s390_fac_size(); i++) {
2681 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2682 (kvm_s390_fac_base[i] |
2683 kvm_s390_fac_ext[i]);
2684 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2685 kvm_s390_fac_base[i];
2686 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002687 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002688
David Hildenbrand19352222017-08-29 16:31:08 +02002689 /* we are always in czam mode - even on pre z14 machines */
2690 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2691 set_kvm_facility(kvm->arch.model.fac_list, 138);
2692 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002693 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2694 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002695 if (MACHINE_HAS_TLB_GUEST) {
2696 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2697 set_kvm_facility(kvm->arch.model.fac_list, 147);
2698 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002699
Pierre Morel05f31e32019-05-21 17:34:37 +02002700 if (css_general_characteristics.aiv && test_facility(65))
2701 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2702
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002703 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002704 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002705
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002706 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002707
Fei Li51978392017-02-17 17:06:26 +08002708 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002709 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002710 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2711 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002712 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002713 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002714
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002715 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002716 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002717
Carsten Ottee08b9632012-01-04 10:25:20 +01002718 if (type & KVM_VM_S390_UCONTROL) {
2719 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002720 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002721 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002722 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002723 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002724 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002725 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002726 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002727 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002728 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002729 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002730 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002731 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002732 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002733
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002734 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002735 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002736 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002737 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002738 if (use_gisa)
2739 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002740 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002741
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002742 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002743out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002744 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002745 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002746 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002747 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002748 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002749}
2750
Christian Borntraegerd329c032008-11-26 14:50:27 +01002751void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2752{
Janosch Frank29b40f12019-09-30 04:19:18 -04002753 u16 rc, rrc;
2754
Christian Borntraegerd329c032008-11-26 14:50:27 +01002755 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002756 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002757 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002758 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002759 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002760 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002761
2762 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002763 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002764
Dominik Dingele6db1d62015-05-07 15:41:57 +02002765 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002766 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002767 /* We can not hold the vcpu mutex here, we are already dying */
2768 if (kvm_s390_pv_cpu_get_handle(vcpu))
2769 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002770 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002771}
2772
2773static void kvm_free_vcpus(struct kvm *kvm)
2774{
2775 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002776 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002777
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002778 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002779 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002780
2781 mutex_lock(&kvm->lock);
2782 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2783 kvm->vcpus[i] = NULL;
2784
2785 atomic_set(&kvm->online_vcpus, 0);
2786 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002787}
2788
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002789void kvm_arch_destroy_vm(struct kvm *kvm)
2790{
Janosch Frank29b40f12019-09-30 04:19:18 -04002791 u16 rc, rrc;
2792
Christian Borntraegerd329c032008-11-26 14:50:27 +01002793 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002794 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002795 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002796 /*
2797 * We are already at the end of life and kvm->lock is not taken.
2798 * This is ok as the file descriptor is closed by now and nobody
2799 * can mess with the pv state. To avoid lockdep_assert_held from
2800 * complaining we do not use kvm_s390_pv_is_protected.
2801 */
2802 if (kvm_s390_pv_get_handle(kvm))
2803 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2804 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002805 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002806 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002807 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002808 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002809 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002810 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002811 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002812}
2813
2814/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002815static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2816{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002817 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002818 if (!vcpu->arch.gmap)
2819 return -ENOMEM;
2820 vcpu->arch.gmap->private = vcpu->kvm;
2821
2822 return 0;
2823}
2824
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002825static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2826{
David Hildenbranda6940672016-08-08 22:39:32 +02002827 if (!kvm_s390_use_sca_entries())
2828 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002829 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002830 if (vcpu->kvm->arch.use_esca) {
2831 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002832
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002833 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002834 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002835 } else {
2836 struct bsca_block *sca = vcpu->kvm->arch.sca;
2837
2838 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002839 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002840 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002841 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002842}
2843
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002844static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002845{
David Hildenbranda6940672016-08-08 22:39:32 +02002846 if (!kvm_s390_use_sca_entries()) {
2847 struct bsca_block *sca = vcpu->kvm->arch.sca;
2848
2849 /* we still need the basic sca for the ipte control */
2850 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2851 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002852 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002853 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002854 read_lock(&vcpu->kvm->arch.sca_lock);
2855 if (vcpu->kvm->arch.use_esca) {
2856 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002857
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002858 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002859 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2860 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002861 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002862 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002863 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002864 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002865
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002866 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002867 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2868 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002869 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002870 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002871 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002872}
2873
2874/* Basic SCA to Extended SCA data copy routines */
2875static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2876{
2877 d->sda = s->sda;
2878 d->sigp_ctrl.c = s->sigp_ctrl.c;
2879 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2880}
2881
2882static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2883{
2884 int i;
2885
2886 d->ipte_control = s->ipte_control;
2887 d->mcn[0] = s->mcn;
2888 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2889 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2890}
2891
2892static int sca_switch_to_extended(struct kvm *kvm)
2893{
2894 struct bsca_block *old_sca = kvm->arch.sca;
2895 struct esca_block *new_sca;
2896 struct kvm_vcpu *vcpu;
2897 unsigned int vcpu_idx;
2898 u32 scaol, scaoh;
2899
Janosch Frank29b40f12019-09-30 04:19:18 -04002900 if (kvm->arch.use_esca)
2901 return 0;
2902
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002903 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2904 if (!new_sca)
2905 return -ENOMEM;
2906
2907 scaoh = (u32)((u64)(new_sca) >> 32);
2908 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2909
2910 kvm_s390_vcpu_block_all(kvm);
2911 write_lock(&kvm->arch.sca_lock);
2912
2913 sca_copy_b_to_e(new_sca, old_sca);
2914
2915 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2916 vcpu->arch.sie_block->scaoh = scaoh;
2917 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002918 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002919 }
2920 kvm->arch.sca = new_sca;
2921 kvm->arch.use_esca = 1;
2922
2923 write_unlock(&kvm->arch.sca_lock);
2924 kvm_s390_vcpu_unblock_all(kvm);
2925
2926 free_page((unsigned long)old_sca);
2927
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002928 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2929 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002930 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002931}
2932
2933static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2934{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002935 int rc;
2936
David Hildenbranda6940672016-08-08 22:39:32 +02002937 if (!kvm_s390_use_sca_entries()) {
2938 if (id < KVM_MAX_VCPUS)
2939 return true;
2940 return false;
2941 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002942 if (id < KVM_S390_BSCA_CPU_SLOTS)
2943 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002944 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002945 return false;
2946
2947 mutex_lock(&kvm->lock);
2948 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2949 mutex_unlock(&kvm->lock);
2950
2951 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002952}
2953
David Hildenbranddb0758b2016-02-15 09:42:25 +01002954/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2955static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2956{
2957 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002958 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002959 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002960 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002961}
2962
2963/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2964static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2965{
2966 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002967 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002968 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2969 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002970 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002971}
2972
2973/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2974static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2975{
2976 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2977 vcpu->arch.cputm_enabled = true;
2978 __start_cpu_timer_accounting(vcpu);
2979}
2980
2981/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2982static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2983{
2984 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2985 __stop_cpu_timer_accounting(vcpu);
2986 vcpu->arch.cputm_enabled = false;
2987}
2988
2989static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2990{
2991 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2992 __enable_cpu_timer_accounting(vcpu);
2993 preempt_enable();
2994}
2995
2996static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2997{
2998 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2999 __disable_cpu_timer_accounting(vcpu);
3000 preempt_enable();
3001}
3002
David Hildenbrand4287f242016-02-15 09:40:12 +01003003/* set the cpu timer - may only be called from the VCPU thread itself */
3004void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3005{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003006 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003007 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003008 if (vcpu->arch.cputm_enabled)
3009 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003010 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003011 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003012 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003013}
3014
David Hildenbranddb0758b2016-02-15 09:42:25 +01003015/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003016__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3017{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003018 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003019 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003020
3021 if (unlikely(!vcpu->arch.cputm_enabled))
3022 return vcpu->arch.sie_block->cputm;
3023
David Hildenbrand9c23a132016-02-17 21:53:33 +01003024 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3025 do {
3026 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3027 /*
3028 * If the writer would ever execute a read in the critical
3029 * section, e.g. in irq context, we have a deadlock.
3030 */
3031 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3032 value = vcpu->arch.sie_block->cputm;
3033 /* if cputm_start is 0, accounting is being started/stopped */
3034 if (likely(vcpu->arch.cputm_start))
3035 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3036 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3037 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003038 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003039}
3040
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003041void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3042{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003043
David Hildenbrand37d9df92015-03-11 16:47:33 +01003044 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003045 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003046 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003047 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003048 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003049}
3050
3051void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3052{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003053 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003054 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003055 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003056 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003057 vcpu->arch.enabled_gmap = gmap_get_enabled();
3058 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003059
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003060}
3061
Dominik Dingel31928aa2014-12-04 15:47:07 +01003062void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003063{
Jason J. Herne72f25022014-11-25 09:46:02 -05003064 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003065 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003066 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003067 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003068 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003069 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003070 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003071 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003072 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003073 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003074 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3075 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003076 /* make vcpu_load load the right gmap on the first trigger */
3077 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003078}
3079
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003080static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3081{
3082 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3083 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3084 return true;
3085 return false;
3086}
3087
3088static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3089{
3090 /* At least one ECC subfunction must be present */
3091 return kvm_has_pckmo_subfunc(kvm, 32) ||
3092 kvm_has_pckmo_subfunc(kvm, 33) ||
3093 kvm_has_pckmo_subfunc(kvm, 34) ||
3094 kvm_has_pckmo_subfunc(kvm, 40) ||
3095 kvm_has_pckmo_subfunc(kvm, 41);
3096
3097}
3098
Tony Krowiak5102ee82014-06-27 14:46:01 -04003099static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3100{
Tony Krowiake585b242018-09-25 19:16:18 -04003101 /*
3102 * If the AP instructions are not being interpreted and the MSAX3
3103 * facility is not configured for the guest, there is nothing to set up.
3104 */
3105 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003106 return;
3107
Tony Krowiake585b242018-09-25 19:16:18 -04003108 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003109 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003110 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003111 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003112
Tony Krowiake585b242018-09-25 19:16:18 -04003113 if (vcpu->kvm->arch.crypto.apie)
3114 vcpu->arch.sie_block->eca |= ECA_APIE;
3115
3116 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003117 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003118 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003119 /* ecc is also wrapped with AES key */
3120 if (kvm_has_pckmo_ecc(vcpu->kvm))
3121 vcpu->arch.sie_block->ecd |= ECD_ECC;
3122 }
3123
Tony Krowiaka374e892014-09-03 10:13:53 +02003124 if (vcpu->kvm->arch.crypto.dea_kw)
3125 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003126}
3127
Dominik Dingelb31605c2014-03-25 13:47:11 +01003128void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3129{
3130 free_page(vcpu->arch.sie_block->cbrlo);
3131 vcpu->arch.sie_block->cbrlo = 0;
3132}
3133
3134int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3135{
3136 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
3137 if (!vcpu->arch.sie_block->cbrlo)
3138 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003139 return 0;
3140}
3141
Michael Mueller91520f12015-02-27 14:32:11 +01003142static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3143{
3144 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3145
Michael Mueller91520f12015-02-27 14:32:11 +01003146 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003147 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003148 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003149}
3150
Sean Christophersonff72bb52019-12-18 13:55:20 -08003151static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3152{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003153 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003154 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003155
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003156 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3157 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003158 CPUSTAT_STOPPED);
3159
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003160 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003161 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003162 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003163 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003164
Michael Mueller91520f12015-02-27 14:32:11 +01003165 kvm_s390_vcpu_setup_model(vcpu);
3166
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003167 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3168 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003169 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003170 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003171 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003172 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003173 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003174
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003175 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003176 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003177 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003178 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3179 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003180 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003181 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003182 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003183 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003184 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003185 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003186 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003187 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003188 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003189 vcpu->arch.sie_block->eca |= ECA_VX;
3190 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003191 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003192 if (test_kvm_facility(vcpu->kvm, 139))
3193 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003194 if (test_kvm_facility(vcpu->kvm, 156))
3195 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003196 if (vcpu->arch.sie_block->gd) {
3197 vcpu->arch.sie_block->eca |= ECA_AIV;
3198 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3199 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3200 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003201 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3202 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003203 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003204
3205 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003206 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003207 else
3208 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003209
Dominik Dingele6db1d62015-05-07 15:41:57 +02003210 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003211 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3212 if (rc)
3213 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003214 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003215 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003216 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003217
Collin Walling67d49d52018-08-31 12:51:19 -04003218 vcpu->arch.sie_block->hpid = HPID_KVM;
3219
Tony Krowiak5102ee82014-06-27 14:46:01 -04003220 kvm_s390_vcpu_crypto_setup(vcpu);
3221
Janosch Frank29b40f12019-09-30 04:19:18 -04003222 mutex_lock(&vcpu->kvm->lock);
3223 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3224 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3225 if (rc)
3226 kvm_s390_vcpu_unsetup_cmma(vcpu);
3227 }
3228 mutex_unlock(&vcpu->kvm->lock);
3229
Dominik Dingelb31605c2014-03-25 13:47:11 +01003230 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003231}
3232
Sean Christopherson897cc382019-12-18 13:55:09 -08003233int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3234{
3235 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3236 return -EINVAL;
3237 return 0;
3238}
3239
Sean Christophersone529ef62019-12-18 13:55:15 -08003240int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003241{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003242 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003243 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003244
QingFeng Haoda72ca42017-06-07 11:41:19 +02003245 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003246 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3247 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003248 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003249
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003250 vcpu->arch.sie_block = &sie_page->sie_block;
3251 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3252
David Hildenbrandefed1102015-04-16 12:32:41 +02003253 /* the real guest size will always be smaller than msl */
3254 vcpu->arch.sie_block->mso = 0;
3255 vcpu->arch.sie_block->msl = sclp.hamax;
3256
Sean Christophersone529ef62019-12-18 13:55:15 -08003257 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003258 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003259 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003260 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3261 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003262 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003263
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003264 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3265 kvm_clear_async_pf_completion_queue(vcpu);
3266 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3267 KVM_SYNC_GPRS |
3268 KVM_SYNC_ACRS |
3269 KVM_SYNC_CRS |
3270 KVM_SYNC_ARCH0 |
Collin Walling23a60f82020-06-22 11:46:36 -04003271 KVM_SYNC_PFAULT |
3272 KVM_SYNC_DIAG318;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003273 kvm_s390_set_prefix(vcpu, 0);
3274 if (test_kvm_facility(vcpu->kvm, 64))
3275 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3276 if (test_kvm_facility(vcpu->kvm, 82))
3277 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3278 if (test_kvm_facility(vcpu->kvm, 133))
3279 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3280 if (test_kvm_facility(vcpu->kvm, 156))
3281 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3282 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3283 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3284 */
3285 if (MACHINE_HAS_VX)
3286 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3287 else
3288 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3289
3290 if (kvm_is_ucontrol(vcpu->kvm)) {
3291 rc = __kvm_ucontrol_vcpu_init(vcpu);
3292 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003293 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003294 }
3295
Sean Christophersone529ef62019-12-18 13:55:15 -08003296 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3297 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3298 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003299
Sean Christophersonff72bb52019-12-18 13:55:20 -08003300 rc = kvm_s390_vcpu_setup(vcpu);
3301 if (rc)
3302 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003303 return 0;
3304
Sean Christophersonff72bb52019-12-18 13:55:20 -08003305out_ucontrol_uninit:
3306 if (kvm_is_ucontrol(vcpu->kvm))
3307 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003308out_free_sie_block:
3309 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003310 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003311}
3312
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003313int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3314{
Halil Pasice11a7352021-10-19 19:53:59 +02003315 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask);
David Hildenbrand9a022062014-08-05 17:40:47 +02003316 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003317}
3318
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003319bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3320{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003321 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003322}
3323
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003324void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003325{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003326 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003327 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003328}
3329
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003330void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003331{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003332 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003333}
3334
Christian Borntraeger8e236542015-04-09 13:49:04 +02003335static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3336{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003337 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003338 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003339}
3340
David Hildenbrand9ea59722018-09-25 19:16:16 -04003341bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3342{
3343 return atomic_read(&vcpu->arch.sie_block->prog20) &
3344 (PROG_BLOCK_SIE | PROG_REQUEST);
3345}
3346
Christian Borntraeger8e236542015-04-09 13:49:04 +02003347static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3348{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003349 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003350}
3351
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003352/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003353 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003354 * If the CPU is not running (e.g. waiting as idle) the function will
3355 * return immediately. */
3356void exit_sie(struct kvm_vcpu *vcpu)
3357{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003358 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003359 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003360 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3361 cpu_relax();
3362}
3363
Christian Borntraeger8e236542015-04-09 13:49:04 +02003364/* Kick a guest cpu out of SIE to process a request synchronously */
3365void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003366{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003367 kvm_make_request(req, vcpu);
3368 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003369}
3370
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003371static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3372 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003373{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003374 struct kvm *kvm = gmap->private;
3375 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003376 unsigned long prefix;
3377 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003378
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003379 if (gmap_is_shadow(gmap))
3380 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003381 if (start >= 1UL << 31)
3382 /* We are only interested in prefix pages */
3383 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003384 kvm_for_each_vcpu(i, vcpu, kvm) {
3385 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003386 prefix = kvm_s390_get_prefix(vcpu);
3387 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3388 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3389 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003390 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003391 }
3392 }
3393}
3394
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003395bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3396{
3397 /* do not poll with more than halt_poll_max_steal percent of steal time */
3398 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3399 halt_poll_max_steal) {
3400 vcpu->stat.halt_no_poll_steal++;
3401 return true;
3402 }
3403 return false;
3404}
3405
Christoffer Dallb6d33832012-03-08 16:44:24 -05003406int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3407{
3408 /* kvm common code refers to this, but never calls it */
3409 BUG();
3410 return 0;
3411}
3412
Carsten Otte14eebd92012-05-15 14:15:26 +02003413static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3414 struct kvm_one_reg *reg)
3415{
3416 int r = -EINVAL;
3417
3418 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003419 case KVM_REG_S390_TODPR:
3420 r = put_user(vcpu->arch.sie_block->todpr,
3421 (u32 __user *)reg->addr);
3422 break;
3423 case KVM_REG_S390_EPOCHDIFF:
3424 r = put_user(vcpu->arch.sie_block->epoch,
3425 (u64 __user *)reg->addr);
3426 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003427 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003428 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003429 (u64 __user *)reg->addr);
3430 break;
3431 case KVM_REG_S390_CLOCK_COMP:
3432 r = put_user(vcpu->arch.sie_block->ckc,
3433 (u64 __user *)reg->addr);
3434 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003435 case KVM_REG_S390_PFTOKEN:
3436 r = put_user(vcpu->arch.pfault_token,
3437 (u64 __user *)reg->addr);
3438 break;
3439 case KVM_REG_S390_PFCOMPARE:
3440 r = put_user(vcpu->arch.pfault_compare,
3441 (u64 __user *)reg->addr);
3442 break;
3443 case KVM_REG_S390_PFSELECT:
3444 r = put_user(vcpu->arch.pfault_select,
3445 (u64 __user *)reg->addr);
3446 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003447 case KVM_REG_S390_PP:
3448 r = put_user(vcpu->arch.sie_block->pp,
3449 (u64 __user *)reg->addr);
3450 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003451 case KVM_REG_S390_GBEA:
3452 r = put_user(vcpu->arch.sie_block->gbea,
3453 (u64 __user *)reg->addr);
3454 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003455 default:
3456 break;
3457 }
3458
3459 return r;
3460}
3461
3462static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3463 struct kvm_one_reg *reg)
3464{
3465 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003466 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003467
3468 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003469 case KVM_REG_S390_TODPR:
3470 r = get_user(vcpu->arch.sie_block->todpr,
3471 (u32 __user *)reg->addr);
3472 break;
3473 case KVM_REG_S390_EPOCHDIFF:
3474 r = get_user(vcpu->arch.sie_block->epoch,
3475 (u64 __user *)reg->addr);
3476 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003477 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003478 r = get_user(val, (u64 __user *)reg->addr);
3479 if (!r)
3480 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003481 break;
3482 case KVM_REG_S390_CLOCK_COMP:
3483 r = get_user(vcpu->arch.sie_block->ckc,
3484 (u64 __user *)reg->addr);
3485 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003486 case KVM_REG_S390_PFTOKEN:
3487 r = get_user(vcpu->arch.pfault_token,
3488 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003489 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3490 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003491 break;
3492 case KVM_REG_S390_PFCOMPARE:
3493 r = get_user(vcpu->arch.pfault_compare,
3494 (u64 __user *)reg->addr);
3495 break;
3496 case KVM_REG_S390_PFSELECT:
3497 r = get_user(vcpu->arch.pfault_select,
3498 (u64 __user *)reg->addr);
3499 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003500 case KVM_REG_S390_PP:
3501 r = get_user(vcpu->arch.sie_block->pp,
3502 (u64 __user *)reg->addr);
3503 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003504 case KVM_REG_S390_GBEA:
3505 r = get_user(vcpu->arch.sie_block->gbea,
3506 (u64 __user *)reg->addr);
3507 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003508 default:
3509 break;
3510 }
3511
3512 return r;
3513}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003514
Janosch Frank7de3f142020-01-31 05:02:02 -05003515static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003516{
Janosch Frank7de3f142020-01-31 05:02:02 -05003517 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3518 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3519 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3520
3521 kvm_clear_async_pf_completion_queue(vcpu);
3522 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3523 kvm_s390_vcpu_stop(vcpu);
3524 kvm_s390_clear_local_irqs(vcpu);
3525}
3526
3527static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3528{
3529 /* Initial reset is a superset of the normal reset */
3530 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3531
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003532 /*
3533 * This equals initial cpu reset in pop, but we don't switch to ESA.
3534 * We do not only reset the internal data, but also ...
3535 */
Janosch Frank7de3f142020-01-31 05:02:02 -05003536 vcpu->arch.sie_block->gpsw.mask = 0;
3537 vcpu->arch.sie_block->gpsw.addr = 0;
3538 kvm_s390_set_prefix(vcpu, 0);
3539 kvm_s390_set_cpu_timer(vcpu, 0);
3540 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003541 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3542 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3543 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
Christian Borntraegere93fc7b2020-03-03 03:10:57 -05003544
3545 /* ... the data in sync regs */
3546 memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
3547 vcpu->run->s.regs.ckc = 0;
3548 vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
3549 vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
3550 vcpu->run->psw_addr = 0;
3551 vcpu->run->psw_mask = 0;
3552 vcpu->run->s.regs.todpr = 0;
3553 vcpu->run->s.regs.cputm = 0;
3554 vcpu->run->s.regs.ckc = 0;
3555 vcpu->run->s.regs.pp = 0;
3556 vcpu->run->s.regs.gbea = 1;
Janosch Frank7de3f142020-01-31 05:02:02 -05003557 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003558 /*
3559 * Do not reset these registers in the protected case, as some of
3560 * them are overlayed and they are not accessible in this case
3561 * anyway.
3562 */
3563 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3564 vcpu->arch.sie_block->gbea = 1;
3565 vcpu->arch.sie_block->pp = 0;
3566 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3567 vcpu->arch.sie_block->todpr = 0;
3568 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003569}
3570
3571static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3572{
3573 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3574
3575 /* Clear reset is a superset of the initial reset */
3576 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3577
3578 memset(&regs->gprs, 0, sizeof(regs->gprs));
3579 memset(&regs->vrs, 0, sizeof(regs->vrs));
3580 memset(&regs->acrs, 0, sizeof(regs->acrs));
3581 memset(&regs->gscb, 0, sizeof(regs->gscb));
3582
3583 regs->etoken = 0;
3584 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003585}
3586
3587int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3588{
Christoffer Dall875656f2017-12-04 21:35:27 +01003589 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003590 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003591 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003592 return 0;
3593}
3594
3595int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3596{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003597 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003598 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003599 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003600 return 0;
3601}
3602
3603int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3604 struct kvm_sregs *sregs)
3605{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003606 vcpu_load(vcpu);
3607
Christian Borntraeger59674c12012-01-11 11:20:33 +01003608 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003609 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003610
3611 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003612 return 0;
3613}
3614
3615int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3616 struct kvm_sregs *sregs)
3617{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003618 vcpu_load(vcpu);
3619
Christian Borntraeger59674c12012-01-11 11:20:33 +01003620 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003621 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003622
3623 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003624 return 0;
3625}
3626
3627int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3628{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003629 int ret = 0;
3630
3631 vcpu_load(vcpu);
3632
3633 if (test_fp_ctl(fpu->fpc)) {
3634 ret = -EINVAL;
3635 goto out;
3636 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003637 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003638 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003639 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3640 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003641 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003642 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003643
3644out:
3645 vcpu_put(vcpu);
3646 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003647}
3648
3649int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3650{
Christoffer Dall13931232017-12-04 21:35:34 +01003651 vcpu_load(vcpu);
3652
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003653 /* make sure we have the latest values */
3654 save_fpu_regs();
3655 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003656 convert_vx_to_fp((freg_t *) fpu->fprs,
3657 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003658 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003659 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003660 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003661
3662 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003663 return 0;
3664}
3665
3666static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3667{
3668 int rc = 0;
3669
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003670 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003671 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003672 else {
3673 vcpu->run->psw_mask = psw.mask;
3674 vcpu->run->psw_addr = psw.addr;
3675 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003676 return rc;
3677}
3678
3679int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3680 struct kvm_translation *tr)
3681{
3682 return -EINVAL; /* not implemented yet */
3683}
3684
David Hildenbrand27291e22014-01-23 12:26:52 +01003685#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3686 KVM_GUESTDBG_USE_HW_BP | \
3687 KVM_GUESTDBG_ENABLE)
3688
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003689int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3690 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003691{
David Hildenbrand27291e22014-01-23 12:26:52 +01003692 int rc = 0;
3693
Christoffer Dall66b56562017-12-04 21:35:33 +01003694 vcpu_load(vcpu);
3695
David Hildenbrand27291e22014-01-23 12:26:52 +01003696 vcpu->guest_debug = 0;
3697 kvm_s390_clear_bp_data(vcpu);
3698
Christoffer Dall66b56562017-12-04 21:35:33 +01003699 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3700 rc = -EINVAL;
3701 goto out;
3702 }
3703 if (!sclp.has_gpere) {
3704 rc = -EINVAL;
3705 goto out;
3706 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003707
3708 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3709 vcpu->guest_debug = dbg->control;
3710 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003711 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003712
3713 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3714 rc = kvm_s390_import_bp_data(vcpu, dbg);
3715 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003716 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003717 vcpu->arch.guestdbg.last_bp = 0;
3718 }
3719
3720 if (rc) {
3721 vcpu->guest_debug = 0;
3722 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003723 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003724 }
3725
Christoffer Dall66b56562017-12-04 21:35:33 +01003726out:
3727 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003728 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003729}
3730
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003731int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3732 struct kvm_mp_state *mp_state)
3733{
Christoffer Dallfd232562017-12-04 21:35:30 +01003734 int ret;
3735
3736 vcpu_load(vcpu);
3737
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003738 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003739 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3740 KVM_MP_STATE_OPERATING;
3741
3742 vcpu_put(vcpu);
3743 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003744}
3745
3746int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3747 struct kvm_mp_state *mp_state)
3748{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003749 int rc = 0;
3750
Christoffer Dalle83dff52017-12-04 21:35:31 +01003751 vcpu_load(vcpu);
3752
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003753 /* user space knows about this interface - let it control the state */
3754 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3755
3756 switch (mp_state->mp_state) {
3757 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003758 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003759 break;
3760 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003761 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003762 break;
3763 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003764 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3765 rc = -ENXIO;
3766 break;
3767 }
3768 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3769 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003770 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003771 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003772 default:
3773 rc = -ENXIO;
3774 }
3775
Christoffer Dalle83dff52017-12-04 21:35:31 +01003776 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003777 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003778}
3779
David Hildenbrand8ad35752014-03-14 11:00:21 +01003780static bool ibs_enabled(struct kvm_vcpu *vcpu)
3781{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003782 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003783}
3784
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003785static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3786{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003787retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003788 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003789 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003790 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003791 /*
3792 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003793 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003794 * This ensures that the ipte instruction for this request has
3795 * already finished. We might race against a second unmapper that
3796 * wants to set the blocking bit. Lets just retry the request loop.
3797 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003798 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003799 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003800 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3801 kvm_s390_get_prefix(vcpu),
3802 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003803 if (rc) {
3804 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003805 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003806 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003807 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003808 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003809
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003810 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3811 vcpu->arch.sie_block->ihcpu = 0xffff;
3812 goto retry;
3813 }
3814
David Hildenbrand8ad35752014-03-14 11:00:21 +01003815 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3816 if (!ibs_enabled(vcpu)) {
3817 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003818 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003819 }
3820 goto retry;
3821 }
3822
3823 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3824 if (ibs_enabled(vcpu)) {
3825 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003826 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003827 }
3828 goto retry;
3829 }
3830
David Hildenbrand6502a342016-06-21 14:19:51 +02003831 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3832 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3833 goto retry;
3834 }
3835
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003836 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3837 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003838 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003839 * instruction manually, in order to provide additional
3840 * functionalities needed for live migration.
3841 */
3842 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3843 goto retry;
3844 }
3845
3846 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3847 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003848 * Re-enable CMM virtualization if CMMA is available and
3849 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003850 */
3851 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003852 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003853 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3854 goto retry;
3855 }
3856
David Hildenbrand0759d062014-05-13 16:54:32 +02003857 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003858 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003859 /* we left the vsie handler, nothing to do, just clear the request */
3860 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003861
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003862 return 0;
3863}
3864
David Hildenbrand0e7def52018-02-07 12:46:43 +01003865void kvm_s390_set_tod_clock(struct kvm *kvm,
3866 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003867{
3868 struct kvm_vcpu *vcpu;
3869 struct kvm_s390_tod_clock_ext htod;
3870 int i;
3871
3872 mutex_lock(&kvm->lock);
3873 preempt_disable();
3874
3875 get_tod_clock_ext((char *)&htod);
3876
3877 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003878 kvm->arch.epdx = 0;
3879 if (test_kvm_facility(kvm, 139)) {
3880 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3881 if (kvm->arch.epoch > gtod->tod)
3882 kvm->arch.epdx -= 1;
3883 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003884
3885 kvm_s390_vcpu_block_all(kvm);
3886 kvm_for_each_vcpu(i, vcpu, kvm) {
3887 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3888 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3889 }
3890
3891 kvm_s390_vcpu_unblock_all(kvm);
3892 preempt_enable();
3893 mutex_unlock(&kvm->lock);
3894}
3895
Thomas Huthfa576c52014-05-06 17:20:16 +02003896/**
3897 * kvm_arch_fault_in_page - fault-in guest page if necessary
3898 * @vcpu: The corresponding virtual cpu
3899 * @gpa: Guest physical address
3900 * @writable: Whether the page should be writable or not
3901 *
3902 * Make sure that a guest page has been faulted-in on the host.
3903 *
3904 * Return: Zero on success, negative error code otherwise.
3905 */
3906long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003907{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003908 return gmap_fault(vcpu->arch.gmap, gpa,
3909 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003910}
3911
Dominik Dingel3c038e62013-10-07 17:11:48 +02003912static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3913 unsigned long token)
3914{
3915 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003916 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003917
3918 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003919 irq.u.ext.ext_params2 = token;
3920 irq.type = KVM_S390_INT_PFAULT_INIT;
3921 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003922 } else {
3923 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003924 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003925 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3926 }
3927}
3928
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003929bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
Dominik Dingel3c038e62013-10-07 17:11:48 +02003930 struct kvm_async_pf *work)
3931{
3932 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3933 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
Vitaly Kuznetsov2a18b7e2020-06-10 19:55:32 +02003934
3935 return true;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003936}
3937
3938void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3939 struct kvm_async_pf *work)
3940{
3941 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3942 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3943}
3944
3945void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3946 struct kvm_async_pf *work)
3947{
3948 /* s390 will always inject the page directly */
3949}
3950
Vitaly Kuznetsov7c0ade62020-05-25 16:41:18 +02003951bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003952{
3953 /*
3954 * s390 will always inject the page directly,
3955 * but we still want check_async_completion to cleanup
3956 */
3957 return true;
3958}
3959
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003960static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
Dominik Dingel3c038e62013-10-07 17:11:48 +02003961{
3962 hva_t hva;
3963 struct kvm_arch_async_pf arch;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003964
3965 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003966 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003967 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3968 vcpu->arch.pfault_compare)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003969 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003970 if (psw_extint_disabled(vcpu))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003971 return false;
David Hildenbrand9a022062014-08-05 17:40:47 +02003972 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003973 return false;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003974 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003975 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003976 if (!vcpu->arch.gmap->pfault_enabled)
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003977 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003978
Heiko Carstens81480cc2014-01-01 16:36:07 +01003979 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3980 hva += current->thread.gmap_addr & ~PAGE_MASK;
3981 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003982 return false;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003983
Vitaly Kuznetsove8c22262020-06-15 14:13:34 +02003984 return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
Dominik Dingel3c038e62013-10-07 17:11:48 +02003985}
3986
Thomas Huth3fb4c402013-09-12 10:33:43 +02003987static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003988{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003989 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003990
Dominik Dingel3c038e62013-10-07 17:11:48 +02003991 /*
3992 * On s390 notifications for arriving pages will be delivered directly
3993 * to the guest but the house keeping for completed pfaults is
3994 * handled outside the worker.
3995 */
3996 kvm_check_async_pf_completion(vcpu);
3997
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003998 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3999 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004000
4001 if (need_resched())
4002 schedule();
4003
Jens Freimann79395032014-04-17 10:10:30 +02004004 if (!kvm_is_ucontrol(vcpu->kvm)) {
4005 rc = kvm_s390_deliver_pending_interrupts(vcpu);
4006 if (rc)
4007 return rc;
4008 }
Carsten Otte0ff31862008-05-21 13:37:37 +02004009
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02004010 rc = kvm_s390_handle_requests(vcpu);
4011 if (rc)
4012 return rc;
4013
David Hildenbrand27291e22014-01-23 12:26:52 +01004014 if (guestdbg_enabled(vcpu)) {
4015 kvm_s390_backup_guest_per_regs(vcpu);
4016 kvm_s390_patch_guest_per_regs(vcpu);
4017 }
4018
Halil Pasic1db337b2021-08-27 14:54:29 +02004019 clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
Michael Mueller9f30f622019-01-31 09:52:44 +01004020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004021 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004022 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4023 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4024 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004025
Thomas Huth3fb4c402013-09-12 10:33:43 +02004026 return 0;
4027}
4028
Thomas Huth492d8642015-02-10 16:11:01 +01004029static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4030{
David Hildenbrand56317922016-01-12 17:37:58 +01004031 struct kvm_s390_pgm_info pgm_info = {
4032 .code = PGM_ADDRESSING,
4033 };
4034 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004035 int rc;
4036
4037 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4038 trace_kvm_s390_sie_fault(vcpu);
4039
4040 /*
4041 * We want to inject an addressing exception, which is defined as a
4042 * suppressing or terminating exception. However, since we came here
4043 * by a DAT access exception, the PSW still points to the faulting
4044 * instruction since DAT exceptions are nullifying. So we've got
4045 * to look up the current opcode to get the length of the instruction
4046 * to be able to forward the PSW.
4047 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004048 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004049 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004050 if (rc < 0) {
4051 return rc;
4052 } else if (rc) {
4053 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4054 * Forward by arbitrary ilc, injection will take care of
4055 * nullification if necessary.
4056 */
4057 pgm_info = vcpu->arch.pgm;
4058 ilen = 4;
4059 }
David Hildenbrand56317922016-01-12 17:37:58 +01004060 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4061 kvm_s390_forward_psw(vcpu, ilen);
4062 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004063}
4064
Thomas Huth3fb4c402013-09-12 10:33:43 +02004065static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4066{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004067 struct mcck_volatile_info *mcck_info;
4068 struct sie_page *sie_page;
4069
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004070 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4071 vcpu->arch.sie_block->icptcode);
4072 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4073
David Hildenbrand27291e22014-01-23 12:26:52 +01004074 if (guestdbg_enabled(vcpu))
4075 kvm_s390_restore_guest_per_regs(vcpu);
4076
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004077 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4078 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004079
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004080 if (exit_reason == -EINTR) {
4081 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4082 sie_page = container_of(vcpu->arch.sie_block,
4083 struct sie_page, sie_block);
4084 mcck_info = &sie_page->mcck_info;
4085 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4086 return 0;
4087 }
4088
David Hildenbrand71f116b2015-10-19 16:24:28 +02004089 if (vcpu->arch.sie_block->icptcode > 0) {
4090 int rc = kvm_handle_sie_intercept(vcpu);
4091
4092 if (rc != -EOPNOTSUPP)
4093 return rc;
4094 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4095 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4096 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4097 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4098 return -EREMOTE;
4099 } else if (exit_reason != -EFAULT) {
4100 vcpu->stat.exit_null++;
4101 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004102 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4103 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4104 vcpu->run->s390_ucontrol.trans_exc_code =
4105 current->thread.gmap_addr;
4106 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004107 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004108 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004109 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004110 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004111 if (kvm_arch_setup_async_pf(vcpu))
4112 return 0;
4113 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004114 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004115 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004116}
4117
Janosch Frank3adae0b2019-12-13 08:26:06 -05004118#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004119static int __vcpu_run(struct kvm_vcpu *vcpu)
4120{
4121 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004122 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004123
Thomas Huth800c1062013-09-12 10:33:45 +02004124 /*
4125 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4126 * ning the guest), so that memslots (and other stuff) are protected
4127 */
4128 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4129
Thomas Hutha76ccff2013-09-12 10:33:44 +02004130 do {
4131 rc = vcpu_pre_run(vcpu);
4132 if (rc)
4133 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004134
Thomas Huth800c1062013-09-12 10:33:45 +02004135 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004136 /*
4137 * As PF_VCPU will be used in fault handler, between
4138 * guest_enter and guest_exit should be no uaccess.
4139 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004140 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004141 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004142 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004143 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004144 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4145 memcpy(sie_page->pv_grregs,
4146 vcpu->run->s.regs.gprs,
4147 sizeof(sie_page->pv_grregs));
4148 }
Thomas Hutha76ccff2013-09-12 10:33:44 +02004149 exit_reason = sie64a(vcpu->arch.sie_block,
4150 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004151 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4152 memcpy(vcpu->run->s.regs.gprs,
4153 sie_page->pv_grregs,
4154 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004155 /*
4156 * We're not allowed to inject interrupts on intercepts
4157 * that leave the guest state in an "in-between" state
4158 * where the next SIE entry will do a continuation.
4159 * Fence interrupts in our "internal" PSW.
4160 */
4161 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4162 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4163 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4164 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004165 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004166 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004167 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004168 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004169 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004170 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004171
Thomas Hutha76ccff2013-09-12 10:33:44 +02004172 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004173 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004174
Thomas Huth800c1062013-09-12 10:33:45 +02004175 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004176 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004177}
4178
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004179static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004180{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004181 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004182 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004183 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004184
4185 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004186 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004187 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4188 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004189 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004190 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4191 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4192 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4193 }
4194 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4195 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4196 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4197 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004198 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4199 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004200 }
Collin Walling23a60f82020-06-22 11:46:36 -04004201 if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) {
4202 vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318;
4203 vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc;
4204 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004205 /*
4206 * If userspace sets the riccb (e.g. after migration) to a valid state,
4207 * we should enable RI here instead of doing the lazy enablement.
4208 */
4209 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004210 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004211 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004212 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004213 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004214 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004215 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004216 /*
4217 * If userspace sets the gscb (e.g. after migration) to non-zero,
4218 * we should enable GS here instead of doing the lazy enablement.
4219 */
4220 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4221 test_kvm_facility(vcpu->kvm, 133) &&
4222 gscb->gssm &&
4223 !vcpu->arch.gs_enabled) {
4224 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4225 vcpu->arch.sie_block->ecb |= ECB_GS;
4226 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4227 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004228 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004229 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4230 test_kvm_facility(vcpu->kvm, 82)) {
4231 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4232 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4233 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004234 if (MACHINE_HAS_GS) {
4235 preempt_disable();
4236 __ctl_set_bit(2, 4);
4237 if (current->thread.gs_cb) {
4238 vcpu->arch.host_gscb = current->thread.gs_cb;
4239 save_gs_cb(vcpu->arch.host_gscb);
4240 }
4241 if (vcpu->arch.gs_enabled) {
4242 current->thread.gs_cb = (struct gs_cb *)
4243 &vcpu->run->s.regs.gscb;
4244 restore_gs_cb(current->thread.gs_cb);
4245 }
4246 preempt_enable();
4247 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004248 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004249}
4250
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004251static void sync_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004252{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004253 struct kvm_run *kvm_run = vcpu->run;
4254
Janosch Frank811ea792019-06-14 13:11:21 +02004255 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4256 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4257 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4258 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4259 /* some control register changes require a tlb flush */
4260 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4261 }
4262 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4263 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4264 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4265 }
4266 save_access_regs(vcpu->arch.host_acrs);
4267 restore_access_regs(vcpu->run->s.regs.acrs);
4268 /* save host (userspace) fprs/vrs */
4269 save_fpu_regs();
4270 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4271 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4272 if (MACHINE_HAS_VX)
4273 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4274 else
4275 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4276 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4277 if (test_fp_ctl(current->thread.fpu.fpc))
4278 /* User space provided an invalid FPC, let's clear it */
4279 current->thread.fpu.fpc = 0;
4280
4281 /* Sync fmt2 only data */
4282 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004283 sync_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004284 } else {
4285 /*
4286 * In several places we have to modify our internal view to
4287 * not do things that are disallowed by the ultravisor. For
4288 * example we must not inject interrupts after specific exits
4289 * (e.g. 112 prefix page not secure). We do this by turning
4290 * off the machine check, external and I/O interrupt bits
4291 * of our PSW copy. To avoid getting validity intercepts, we
4292 * do only accept the condition code from userspace.
4293 */
4294 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4295 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4296 PSW_MASK_CC;
4297 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004298
David Hildenbrandb028ee32014-07-17 10:47:43 +02004299 kvm_run->kvm_dirty_regs = 0;
4300}
4301
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004302static void store_regs_fmt2(struct kvm_vcpu *vcpu)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004303{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004304 struct kvm_run *kvm_run = vcpu->run;
4305
David Hildenbrandb028ee32014-07-17 10:47:43 +02004306 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4307 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4308 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004309 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Collin Walling23a60f82020-06-22 11:46:36 -04004310 kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004311 if (MACHINE_HAS_GS) {
Heiko Carstensbe4ed1a2021-04-15 10:01:27 +02004312 preempt_disable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004313 __ctl_set_bit(2, 4);
4314 if (vcpu->arch.gs_enabled)
4315 save_gs_cb(current->thread.gs_cb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004316 current->thread.gs_cb = vcpu->arch.host_gscb;
4317 restore_gs_cb(vcpu->arch.host_gscb);
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004318 if (!vcpu->arch.host_gscb)
4319 __ctl_clear_bit(2, 4);
4320 vcpu->arch.host_gscb = NULL;
Heiko Carstensbe4ed1a2021-04-15 10:01:27 +02004321 preempt_enable();
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004322 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004323 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004324}
4325
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004326static void store_regs(struct kvm_vcpu *vcpu)
Janosch Frank811ea792019-06-14 13:11:21 +02004327{
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004328 struct kvm_run *kvm_run = vcpu->run;
4329
Janosch Frank811ea792019-06-14 13:11:21 +02004330 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4331 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4332 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4333 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4334 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4335 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4336 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4337 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4338 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4339 save_access_regs(vcpu->run->s.regs.acrs);
4340 restore_access_regs(vcpu->arch.host_acrs);
4341 /* Save guest register state */
4342 save_fpu_regs();
4343 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4344 /* Restore will be done lazily at return */
4345 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4346 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4347 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004348 store_regs_fmt2(vcpu);
Janosch Frank811ea792019-06-14 13:11:21 +02004349}
4350
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004351int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004352{
Tianjia Zhang1b94f6f2020-04-16 13:10:57 +08004353 struct kvm_run *kvm_run = vcpu->run;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004354 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004355
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004356 if (kvm_run->immediate_exit)
4357 return -EINTR;
4358
Thomas Huth200824f2019-09-04 10:51:59 +02004359 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4360 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4361 return -EINVAL;
4362
Christoffer Dallaccb7572017-12-04 21:35:25 +01004363 vcpu_load(vcpu);
4364
David Hildenbrand27291e22014-01-23 12:26:52 +01004365 if (guestdbg_exit_pending(vcpu)) {
4366 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004367 rc = 0;
4368 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004369 }
4370
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004371 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004372
Janosch Frankfe28c7862019-05-15 13:24:30 +02004373 /*
4374 * no need to check the return value of vcpu_start as it can only have
4375 * an error for protvirt, but protvirt means user cpu state
4376 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004377 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4378 kvm_s390_vcpu_start(vcpu);
4379 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004380 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004381 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004382 rc = -EINVAL;
4383 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004384 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004385
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004386 sync_regs(vcpu);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004387 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004388
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004389 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004390 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004391
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004392 if (signal_pending(current) && !rc) {
4393 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004394 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004395 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004396
David Hildenbrand27291e22014-01-23 12:26:52 +01004397 if (guestdbg_exit_pending(vcpu) && !rc) {
4398 kvm_s390_prepare_debug_exit(vcpu);
4399 rc = 0;
4400 }
4401
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004402 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004403 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004404 rc = 0;
4405 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004406
David Hildenbranddb0758b2016-02-15 09:42:25 +01004407 disable_cpu_timer_accounting(vcpu);
Tianjia Zhang2f0a83b2020-06-23 21:14:14 +08004408 store_regs(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004409
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004410 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004411
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004412 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004413out:
4414 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004415 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004416}
4417
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004418/*
4419 * store status at address
4420 * we use have two special cases:
4421 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4422 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4423 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004424int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004425{
Carsten Otte092670c2011-07-24 10:48:22 +02004426 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004427 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004428 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004429 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004430 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004431
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004432 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004433 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4434 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004435 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004436 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004437 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4438 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004439 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004440 gpa = px;
4441 } else
4442 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004443
4444 /* manually convert vector registers if necessary */
4445 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004446 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004447 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4448 fprs, 128);
4449 } else {
4450 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004451 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004452 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004453 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004454 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004455 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004456 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004457 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004458 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004459 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004460 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004461 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004462 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004463 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004464 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004465 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004466 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004467 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004468 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004469 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004470 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004471 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004472 &vcpu->arch.sie_block->gcr, 128);
4473 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004474}
4475
Thomas Huthe8798922013-11-06 15:46:33 +01004476int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4477{
4478 /*
4479 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004480 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004481 * it into the save area
4482 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004483 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004484 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004485 save_access_regs(vcpu->run->s.regs.acrs);
4486
4487 return kvm_s390_store_status_unloaded(vcpu, addr);
4488}
4489
David Hildenbrand8ad35752014-03-14 11:00:21 +01004490static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4491{
4492 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004493 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004494}
4495
4496static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4497{
4498 unsigned int i;
4499 struct kvm_vcpu *vcpu;
4500
4501 kvm_for_each_vcpu(i, vcpu, kvm) {
4502 __disable_ibs_on_vcpu(vcpu);
4503 }
4504}
4505
4506static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4507{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004508 if (!sclp.has_ibs)
4509 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004510 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004511 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004512}
4513
Janosch Frankfe28c7862019-05-15 13:24:30 +02004514int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004515{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004516 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004517
4518 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004519 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004520
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004521 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004522 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004523 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004524 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4525
Janosch Frankfe28c7862019-05-15 13:24:30 +02004526 /* Let's tell the UV that we want to change into the operating state */
4527 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4528 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4529 if (r) {
4530 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4531 return r;
4532 }
4533 }
4534
David Hildenbrand8ad35752014-03-14 11:00:21 +01004535 for (i = 0; i < online_vcpus; i++) {
4536 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4537 started_vcpus++;
4538 }
4539
4540 if (started_vcpus == 0) {
4541 /* we're the only active VCPU -> speed it up */
4542 __enable_ibs_on_vcpu(vcpu);
4543 } else if (started_vcpus == 1) {
4544 /*
4545 * As we are starting a second VCPU, we have to disable
4546 * the IBS facility on all VCPUs to remove potentially
4547 * oustanding ENABLE requests.
4548 */
4549 __disable_ibs_on_all_vcpus(vcpu->kvm);
4550 }
4551
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004552 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004553 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004554 * The real PSW might have changed due to a RESTART interpreted by the
4555 * ultravisor. We block all interrupts and let the next sie exit
4556 * refresh our view.
4557 */
4558 if (kvm_s390_pv_cpu_is_protected(vcpu))
4559 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4560 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004561 * Another VCPU might have used IBS while we were offline.
4562 * Let's play safe and flush the VCPU at startup.
4563 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004564 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004565 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004566 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004567}
4568
Janosch Frankfe28c7862019-05-15 13:24:30 +02004569int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004570{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004571 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004572 struct kvm_vcpu *started_vcpu = NULL;
4573
4574 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004575 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004576
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004577 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004578 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004579 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004580 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4581
Janosch Frankfe28c7862019-05-15 13:24:30 +02004582 /* Let's tell the UV that we want to change into the stopped state */
4583 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4584 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4585 if (r) {
4586 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4587 return r;
4588 }
4589 }
4590
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004591 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004592 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004593
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004594 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004595 __disable_ibs_on_vcpu(vcpu);
4596
4597 for (i = 0; i < online_vcpus; i++) {
4598 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4599 started_vcpus++;
4600 started_vcpu = vcpu->kvm->vcpus[i];
4601 }
4602 }
4603
4604 if (started_vcpus == 1) {
4605 /*
4606 * As we only have one VCPU left, we want to enable the
4607 * IBS facility for that VCPU to speed it up.
4608 */
4609 __enable_ibs_on_vcpu(started_vcpu);
4610 }
4611
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004612 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004613 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004614}
4615
Cornelia Huckd6712df2012-12-20 15:32:11 +01004616static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4617 struct kvm_enable_cap *cap)
4618{
4619 int r;
4620
4621 if (cap->flags)
4622 return -EINVAL;
4623
4624 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004625 case KVM_CAP_S390_CSS_SUPPORT:
4626 if (!vcpu->kvm->arch.css_support) {
4627 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004628 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004629 trace_kvm_s390_enable_css(vcpu->kvm);
4630 }
4631 r = 0;
4632 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004633 default:
4634 r = -EINVAL;
4635 break;
4636 }
4637 return r;
4638}
4639
Janosch Frank19e12272019-04-02 09:21:06 +02004640static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4641 struct kvm_s390_mem_op *mop)
4642{
4643 void __user *uaddr = (void __user *)mop->buf;
4644 int r = 0;
4645
4646 if (mop->flags || !mop->size)
4647 return -EINVAL;
4648 if (mop->size + mop->sida_offset < mop->size)
4649 return -EINVAL;
4650 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4651 return -E2BIG;
4652
4653 switch (mop->op) {
4654 case KVM_S390_MEMOP_SIDA_READ:
4655 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4656 mop->sida_offset), mop->size))
4657 r = -EFAULT;
4658
4659 break;
4660 case KVM_S390_MEMOP_SIDA_WRITE:
4661 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4662 mop->sida_offset), uaddr, mop->size))
4663 r = -EFAULT;
4664 break;
4665 }
4666 return r;
4667}
Thomas Huth41408c282015-02-06 15:01:21 +01004668static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4669 struct kvm_s390_mem_op *mop)
4670{
4671 void __user *uaddr = (void __user *)mop->buf;
4672 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004673 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004674 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4675 | KVM_S390_MEMOP_F_CHECK_ONLY;
4676
Thomas Hutha13b03b2019-08-29 14:25:17 +02004677 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004678 return -EINVAL;
4679
4680 if (mop->size > MEM_OP_MAX_SIZE)
4681 return -E2BIG;
4682
Janosch Frank19e12272019-04-02 09:21:06 +02004683 if (kvm_s390_pv_cpu_is_protected(vcpu))
4684 return -EINVAL;
4685
Thomas Huth41408c282015-02-06 15:01:21 +01004686 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4687 tmpbuf = vmalloc(mop->size);
4688 if (!tmpbuf)
4689 return -ENOMEM;
4690 }
4691
Thomas Huth41408c282015-02-06 15:01:21 +01004692 switch (mop->op) {
4693 case KVM_S390_MEMOP_LOGICAL_READ:
4694 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004695 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4696 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004697 break;
4698 }
4699 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4700 if (r == 0) {
4701 if (copy_to_user(uaddr, tmpbuf, mop->size))
4702 r = -EFAULT;
4703 }
4704 break;
4705 case KVM_S390_MEMOP_LOGICAL_WRITE:
4706 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004707 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4708 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004709 break;
4710 }
4711 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4712 r = -EFAULT;
4713 break;
4714 }
4715 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4716 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004717 }
4718
Thomas Huth41408c282015-02-06 15:01:21 +01004719 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4720 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4721
4722 vfree(tmpbuf);
4723 return r;
4724}
4725
Janosch Frank19e12272019-04-02 09:21:06 +02004726static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4727 struct kvm_s390_mem_op *mop)
4728{
4729 int r, srcu_idx;
4730
4731 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4732
4733 switch (mop->op) {
4734 case KVM_S390_MEMOP_LOGICAL_READ:
4735 case KVM_S390_MEMOP_LOGICAL_WRITE:
4736 r = kvm_s390_guest_mem_op(vcpu, mop);
4737 break;
4738 case KVM_S390_MEMOP_SIDA_READ:
4739 case KVM_S390_MEMOP_SIDA_WRITE:
4740 /* we are locked against sida going away by the vcpu->mutex */
4741 r = kvm_s390_guest_sida_op(vcpu, mop);
4742 break;
4743 default:
4744 r = -EINVAL;
4745 }
4746
4747 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4748 return r;
4749}
4750
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004751long kvm_arch_vcpu_async_ioctl(struct file *filp,
4752 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004753{
4754 struct kvm_vcpu *vcpu = filp->private_data;
4755 void __user *argp = (void __user *)arg;
4756
Avi Kivity93736622010-05-13 12:35:17 +03004757 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004758 case KVM_S390_IRQ: {
4759 struct kvm_s390_irq s390irq;
4760
Jens Freimann47b43c52014-11-11 20:57:06 +01004761 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004762 return -EFAULT;
4763 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004764 }
Avi Kivity93736622010-05-13 12:35:17 +03004765 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004766 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004767 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004768
4769 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004770 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004771 if (s390int_to_s390irq(&s390int, &s390irq))
4772 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004773 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004774 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004775 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004776 return -ENOIOCTLCMD;
4777}
4778
4779long kvm_arch_vcpu_ioctl(struct file *filp,
4780 unsigned int ioctl, unsigned long arg)
4781{
4782 struct kvm_vcpu *vcpu = filp->private_data;
4783 void __user *argp = (void __user *)arg;
4784 int idx;
4785 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004786 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004787
4788 vcpu_load(vcpu);
4789
4790 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004791 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004792 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004793 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004794 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004795 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004796 case KVM_S390_SET_INITIAL_PSW: {
4797 psw_t psw;
4798
Avi Kivitybc923cc2010-05-13 12:21:46 +03004799 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004800 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004801 break;
4802 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4803 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004804 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004805 case KVM_S390_CLEAR_RESET:
4806 r = 0;
4807 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004808 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4809 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4810 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4811 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4812 rc, rrc);
4813 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004814 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004815 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004816 r = 0;
4817 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004818 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4819 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4820 UVC_CMD_CPU_RESET_INITIAL,
4821 &rc, &rrc);
4822 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4823 rc, rrc);
4824 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004825 break;
4826 case KVM_S390_NORMAL_RESET:
4827 r = 0;
4828 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004829 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4830 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4831 UVC_CMD_CPU_RESET, &rc, &rrc);
4832 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4833 rc, rrc);
4834 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004835 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004836 case KVM_SET_ONE_REG:
4837 case KVM_GET_ONE_REG: {
4838 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004839 r = -EINVAL;
4840 if (kvm_s390_pv_cpu_is_protected(vcpu))
4841 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004842 r = -EFAULT;
4843 if (copy_from_user(&reg, argp, sizeof(reg)))
4844 break;
4845 if (ioctl == KVM_SET_ONE_REG)
4846 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4847 else
4848 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4849 break;
4850 }
Carsten Otte27e03932012-01-04 10:25:21 +01004851#ifdef CONFIG_KVM_S390_UCONTROL
4852 case KVM_S390_UCAS_MAP: {
4853 struct kvm_s390_ucas_mapping ucasmap;
4854
4855 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4856 r = -EFAULT;
4857 break;
4858 }
4859
4860 if (!kvm_is_ucontrol(vcpu->kvm)) {
4861 r = -EINVAL;
4862 break;
4863 }
4864
4865 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4866 ucasmap.vcpu_addr, ucasmap.length);
4867 break;
4868 }
4869 case KVM_S390_UCAS_UNMAP: {
4870 struct kvm_s390_ucas_mapping ucasmap;
4871
4872 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4873 r = -EFAULT;
4874 break;
4875 }
4876
4877 if (!kvm_is_ucontrol(vcpu->kvm)) {
4878 r = -EINVAL;
4879 break;
4880 }
4881
4882 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4883 ucasmap.length);
4884 break;
4885 }
4886#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004887 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004888 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004889 break;
4890 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004891 case KVM_ENABLE_CAP:
4892 {
4893 struct kvm_enable_cap cap;
4894 r = -EFAULT;
4895 if (copy_from_user(&cap, argp, sizeof(cap)))
4896 break;
4897 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4898 break;
4899 }
Thomas Huth41408c282015-02-06 15:01:21 +01004900 case KVM_S390_MEM_OP: {
4901 struct kvm_s390_mem_op mem_op;
4902
4903 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004904 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004905 else
4906 r = -EFAULT;
4907 break;
4908 }
Jens Freimann816c7662014-11-24 17:13:46 +01004909 case KVM_S390_SET_IRQ_STATE: {
4910 struct kvm_s390_irq_state irq_state;
4911
4912 r = -EFAULT;
4913 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4914 break;
4915 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4916 irq_state.len == 0 ||
4917 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4918 r = -EINVAL;
4919 break;
4920 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004921 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004922 r = kvm_s390_set_irq_state(vcpu,
4923 (void __user *) irq_state.buf,
4924 irq_state.len);
4925 break;
4926 }
4927 case KVM_S390_GET_IRQ_STATE: {
4928 struct kvm_s390_irq_state irq_state;
4929
4930 r = -EFAULT;
4931 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4932 break;
4933 if (irq_state.len == 0) {
4934 r = -EINVAL;
4935 break;
4936 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004937 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004938 r = kvm_s390_get_irq_state(vcpu,
4939 (__u8 __user *) irq_state.buf,
4940 irq_state.len);
4941 break;
4942 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004943 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004944 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004945 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004946
4947 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004948 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004949}
4950
Souptick Joarder1499fa82018-04-19 00:49:58 +05304951vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004952{
4953#ifdef CONFIG_KVM_S390_UCONTROL
4954 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4955 && (kvm_is_ucontrol(vcpu->kvm))) {
4956 vmf->page = virt_to_page(vcpu->arch.sie_block);
4957 get_page(vmf->page);
4958 return 0;
4959 }
4960#endif
4961 return VM_FAULT_SIGBUS;
4962}
4963
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004964/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004965int kvm_arch_prepare_memory_region(struct kvm *kvm,
4966 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004967 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004968 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004969{
Nick Wangdd2887e2013-03-25 17:22:57 +01004970 /* A few sanity checks. We can have memory slots which have to be
4971 located/ended at a segment boundary (1MB). The memory in userland is
4972 ok to be fragmented into various different vmas. It is okay to mmap()
4973 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004974
Carsten Otte598841c2011-07-24 10:48:21 +02004975 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004976 return -EINVAL;
4977
Carsten Otte598841c2011-07-24 10:48:21 +02004978 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004979 return -EINVAL;
4980
Dominik Dingela3a92c32014-12-01 17:24:42 +01004981 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4982 return -EINVAL;
4983
Janosch Frank29b40f12019-09-30 04:19:18 -04004984 /* When we are protected, we should not change the memory slots */
4985 if (kvm_s390_pv_get_handle(kvm))
4986 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004987 return 0;
4988}
4989
4990void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004991 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08004992 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004993 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004994 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004995{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004996 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004997
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004998 switch (change) {
4999 case KVM_MR_DELETE:
5000 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5001 old->npages * PAGE_SIZE);
5002 break;
5003 case KVM_MR_MOVE:
5004 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
5005 old->npages * PAGE_SIZE);
5006 if (rc)
5007 break;
Joe Perches3b684a42020-03-10 21:51:32 -07005008 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02005009 case KVM_MR_CREATE:
5010 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
5011 mem->guest_phys_addr, mem->memory_size);
5012 break;
5013 case KVM_MR_FLAGS_ONLY:
5014 break;
5015 default:
5016 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
5017 }
Carsten Otte598841c2011-07-24 10:48:21 +02005018 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02005019 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02005020 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005021}
5022
Alexander Yarygin60a37702016-04-01 15:38:57 +03005023static inline unsigned long nonhyp_mask(int i)
5024{
5025 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
5026
5027 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
5028}
5029
Christian Borntraeger3491caf2016-05-13 12:16:35 +02005030void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
5031{
5032 vcpu->valid_wakeup = false;
5033}
5034
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005035static int __init kvm_s390_init(void)
5036{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005037 int i;
5038
David Hildenbrand07197fd2015-01-30 16:01:38 +01005039 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005040 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005041 return -ENODEV;
5042 }
5043
Janosch Franka4499382018-07-13 11:28:31 +01005044 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005045 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005046 return -EINVAL;
5047 }
5048
Alexander Yarygin60a37702016-04-01 15:38:57 +03005049 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005050 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03005051 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
5052
Michael Mueller9d8d5782015-02-02 15:42:51 +01005053 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005054}
5055
5056static void __exit kvm_s390_exit(void)
5057{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005058 kvm_exit();
5059}
5060
5061module_init(kvm_s390_init);
5062module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005063
5064/*
5065 * Enable autoloading of the kvm module.
5066 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5067 * since x86 takes a different approach.
5068 */
5069#include <linux/miscdevice.h>
5070MODULE_ALIAS_MISCDEV(KVM_MINOR);
5071MODULE_ALIAS("devname:kvm");