blob: 0d45e32dd868ff3603b90a15e8ae4b43ed988a06 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02005 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
10 *
11 * Author(s): Carsten Otte <cotte@de.ibm.com>
12 * Christian Borntraeger <borntraeger@de.ibm.com>
13 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020014 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040015 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010016 */
17
18#include <linux/compiler.h>
19#include <linux/err.h>
20#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020021#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010022#include <linux/init.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010025#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010026#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050027#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020028#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010029#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010030#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010031#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010032#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010033#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020034#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010035
Heiko Carstenscbb870c2010-02-26 22:37:43 +010036#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010037#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020038#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010039#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010040#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010041#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010042#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020043#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020044#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020045#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040046#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010047#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010048#include "gaccess.h"
49
David Hildenbrandea2cdd22015-05-20 13:24:02 +020050#define KMSG_COMPONENT "kvm-s390"
51#undef pr_fmt
52#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
53
Cornelia Huck5786fff2012-07-23 17:20:29 +020054#define CREATE_TRACE_POINTS
55#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020056#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020057
Thomas Huth41408c282015-02-06 15:01:21 +010058#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010059#define LOCAL_IRQS 32
60#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
61 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010062
Heiko Carstensb0c632d2008-03-25 18:47:20 +010063#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
64
65struct kvm_stats_debugfs_item debugfs_entries[] = {
66 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020067 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010068 { "exit_validity", VCPU_STAT(exit_validity) },
69 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
70 { "exit_external_request", VCPU_STAT(exit_external_request) },
71 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010072 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030073 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010074 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
75 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020076 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010077 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020078 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020079 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020080 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020081 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010082 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010083 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
84 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010085 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020086 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
88 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
89 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
90 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
91 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
92 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
93 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020094 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010095 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
96 { "instruction_spx", VCPU_STAT(instruction_spx) },
97 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
98 { "instruction_stap", VCPU_STAT(instruction_stap) },
99 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100100 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100101 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
102 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200103 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100104 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
105 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200106 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200107 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200108 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100109 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100110 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200111 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100112 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200113 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
114 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100115 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200116 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
117 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500118 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100119 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
120 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
121 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200122 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
123 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
124 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100125 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100126 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200127 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200128 { "diagnose_258", VCPU_STAT(diagnose_258) },
129 { "diagnose_308", VCPU_STAT(diagnose_308) },
130 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100131 { NULL }
132};
133
Collin L. Walling8fa16962016-07-26 15:29:44 -0400134struct kvm_s390_tod_clock_ext {
135 __u8 epoch_idx;
136 __u64 tod;
137 __u8 reserved[7];
138} __packed;
139
David Hildenbranda411edf2016-02-02 15:41:22 +0100140/* allow nested virtualization in KVM (if enabled by user space) */
141static int nested;
142module_param(nested, int, S_IRUGO);
143MODULE_PARM_DESC(nested, "Nested virtualization support");
144
Michael Mueller9d8d5782015-02-02 15:42:51 +0100145/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200146unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100147
Michael Mueller9d8d5782015-02-02 15:42:51 +0100148unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200149{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100150 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
151 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200152}
153
David Hildenbrand15c97052015-03-19 17:36:43 +0100154/* available cpu features supported by kvm */
155static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200156/* available subfunctions indicated via query / "test bit" */
157static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100158
Michael Mueller9d8d5782015-02-02 15:42:51 +0100159static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200160static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200161debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100162
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100163/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200164int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100165{
166 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200167 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100168}
169
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100170static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
171 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200172
Fan Zhangfdf03652015-05-13 10:58:41 +0200173/*
174 * This callback is executed during stop_machine(). All CPUs are therefore
175 * temporarily stopped. In order not to change guest behavior, we have to
176 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
177 * so a CPU won't be stopped while calculating with the epoch.
178 */
179static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
180 void *v)
181{
182 struct kvm *kvm;
183 struct kvm_vcpu *vcpu;
184 int i;
185 unsigned long long *delta = v;
186
187 list_for_each_entry(kvm, &vm_list, vm_list) {
188 kvm->arch.epoch -= *delta;
189 kvm_for_each_vcpu(i, vcpu, kvm) {
190 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100191 if (vcpu->arch.cputm_enabled)
192 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100193 if (vcpu->arch.vsie_block)
194 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200195 }
196 }
197 return NOTIFY_OK;
198}
199
200static struct notifier_block kvm_clock_notifier = {
201 .notifier_call = kvm_clock_sync,
202};
203
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100204int kvm_arch_hardware_setup(void)
205{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200206 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100207 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200208 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
209 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200210 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
211 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100212 return 0;
213}
214
215void kvm_arch_hardware_unsetup(void)
216{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100217 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200218 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200219 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
220 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100221}
222
David Hildenbrand22be5a132016-01-21 13:22:54 +0100223static void allow_cpu_feat(unsigned long nr)
224{
225 set_bit_inv(nr, kvm_s390_available_cpu_feat);
226}
227
David Hildenbrand0a763c72016-05-18 16:03:47 +0200228static inline int plo_test_bit(unsigned char nr)
229{
230 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100231 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200232
233 asm volatile(
234 /* Parameter registers are ignored for "test bit" */
235 " plo 0,0,0,0(0)\n"
236 " ipm %0\n"
237 " srl %0,28\n"
238 : "=d" (cc)
239 : "d" (r0)
240 : "cc");
241 return cc == 0;
242}
243
David Hildenbrand22be5a132016-01-21 13:22:54 +0100244static void kvm_s390_cpu_feat_init(void)
245{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200246 int i;
247
248 for (i = 0; i < 256; ++i) {
249 if (plo_test_bit(i))
250 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
251 }
252
253 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400254 ptff(kvm_s390_available_subfunc.ptff,
255 sizeof(kvm_s390_available_subfunc.ptff),
256 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200257
258 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200259 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
260 kvm_s390_available_subfunc.kmac);
261 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
262 kvm_s390_available_subfunc.kmc);
263 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
264 kvm_s390_available_subfunc.km);
265 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
266 kvm_s390_available_subfunc.kimd);
267 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200269 }
270 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200271 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200273 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200274 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
275 kvm_s390_available_subfunc.kmctr);
276 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
277 kvm_s390_available_subfunc.kmf);
278 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
279 kvm_s390_available_subfunc.kmo);
280 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
281 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200282 }
283 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100284 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200285 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200286
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400287 if (test_facility(146)) /* MSA8 */
288 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
289 kvm_s390_available_subfunc.kma);
290
David Hildenbrand22be5a132016-01-21 13:22:54 +0100291 if (MACHINE_HAS_ESOP)
292 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200293 /*
294 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
295 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
296 */
297 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100298 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200299 return;
300 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100301 if (sclp.has_64bscao)
302 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100303 if (sclp.has_siif)
304 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100305 if (sclp.has_gpere)
306 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100307 if (sclp.has_gsls)
308 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100309 if (sclp.has_ib)
310 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100311 if (sclp.has_cei)
312 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100313 if (sclp.has_ibs)
314 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500315 if (sclp.has_kss)
316 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200317 /*
318 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
319 * all skey handling functions read/set the skey from the PGSTE
320 * instead of the real storage key.
321 *
322 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
323 * pages being detected as preserved although they are resident.
324 *
325 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
326 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
327 *
328 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
329 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
330 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
331 *
332 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
333 * cannot easily shadow the SCA because of the ipte lock.
334 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100335}
336
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100337int kvm_arch_init(void *opaque)
338{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200339 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
340 if (!kvm_s390_dbf)
341 return -ENOMEM;
342
343 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
344 debug_unregister(kvm_s390_dbf);
345 return -ENOMEM;
346 }
347
David Hildenbrand22be5a132016-01-21 13:22:54 +0100348 kvm_s390_cpu_feat_init();
349
Cornelia Huck84877d92014-09-02 10:27:35 +0100350 /* Register floating interrupt controller interface. */
351 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352}
353
Christian Borntraeger78f26132015-07-22 15:50:58 +0200354void kvm_arch_exit(void)
355{
356 debug_unregister(kvm_s390_dbf);
357}
358
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100359/* Section: device related */
360long kvm_arch_dev_ioctl(struct file *filp,
361 unsigned int ioctl, unsigned long arg)
362{
363 if (ioctl == KVM_S390_ENABLE_SIE)
364 return s390_enable_sie();
365 return -EINVAL;
366}
367
Alexander Graf784aa3d2014-07-14 18:27:35 +0200368int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100369{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100370 int r;
371
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200372 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100373 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200374 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100375 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100376#ifdef CONFIG_KVM_S390_UCONTROL
377 case KVM_CAP_S390_UCONTROL:
378#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200379 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100380 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200381 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100382 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100383 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100384 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200385 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200386 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200387 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200388 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200389 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100390 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100391 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200392 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100393 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400394 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100395 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200396 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200397 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100398 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100399 case KVM_CAP_S390_AIS_MIGRATION:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100400 r = 1;
401 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100402 case KVM_CAP_S390_MEM_OP:
403 r = MEM_OP_MAX_SIZE;
404 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200405 case KVM_CAP_NR_VCPUS:
406 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100407 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200408 if (!kvm_s390_use_sca_entries())
409 r = KVM_MAX_VCPUS;
410 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100411 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200412 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100413 case KVM_CAP_NR_MEMSLOTS:
414 r = KVM_USER_MEM_SLOTS;
415 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200416 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100417 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200418 break;
Eric Farman68c55752014-06-09 10:57:26 -0400419 case KVM_CAP_S390_VECTOR_REGISTERS:
420 r = MACHINE_HAS_VX;
421 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800422 case KVM_CAP_S390_RI:
423 r = test_facility(64);
424 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100425 case KVM_CAP_S390_GS:
426 r = test_facility(133);
427 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200428 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100429 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200430 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100431 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100432}
433
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400434static void kvm_s390_sync_dirty_log(struct kvm *kvm,
435 struct kvm_memory_slot *memslot)
436{
437 gfn_t cur_gfn, last_gfn;
438 unsigned long address;
439 struct gmap *gmap = kvm->arch.gmap;
440
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400441 /* Loop over all guest pages */
442 last_gfn = memslot->base_gfn + memslot->npages;
443 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
444 address = gfn_to_hva_memslot(memslot, cur_gfn);
445
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100446 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400447 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100448 if (fatal_signal_pending(current))
449 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100450 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400451 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400452}
453
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100454/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200455static void sca_del_vcpu(struct kvm_vcpu *vcpu);
456
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100457/*
458 * Get (and clear) the dirty memory log for a memory slot.
459 */
460int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
461 struct kvm_dirty_log *log)
462{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400463 int r;
464 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200465 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400466 struct kvm_memory_slot *memslot;
467 int is_dirty = 0;
468
Janosch Franke1e8a962017-02-02 16:39:31 +0100469 if (kvm_is_ucontrol(kvm))
470 return -EINVAL;
471
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400472 mutex_lock(&kvm->slots_lock);
473
474 r = -EINVAL;
475 if (log->slot >= KVM_USER_MEM_SLOTS)
476 goto out;
477
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200478 slots = kvm_memslots(kvm);
479 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400480 r = -ENOENT;
481 if (!memslot->dirty_bitmap)
482 goto out;
483
484 kvm_s390_sync_dirty_log(kvm, memslot);
485 r = kvm_get_dirty_log(kvm, log, &is_dirty);
486 if (r)
487 goto out;
488
489 /* Clear the dirty log */
490 if (is_dirty) {
491 n = kvm_dirty_bitmap_bytes(memslot);
492 memset(memslot->dirty_bitmap, 0, n);
493 }
494 r = 0;
495out:
496 mutex_unlock(&kvm->slots_lock);
497 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100498}
499
David Hildenbrand6502a342016-06-21 14:19:51 +0200500static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
501{
502 unsigned int i;
503 struct kvm_vcpu *vcpu;
504
505 kvm_for_each_vcpu(i, vcpu, kvm) {
506 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
507 }
508}
509
Cornelia Huckd938dc52013-10-23 18:26:34 +0200510static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
511{
512 int r;
513
514 if (cap->flags)
515 return -EINVAL;
516
517 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200518 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200519 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200520 kvm->arch.use_irqchip = 1;
521 r = 0;
522 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200523 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200524 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200525 kvm->arch.user_sigp = 1;
526 r = 0;
527 break;
Eric Farman68c55752014-06-09 10:57:26 -0400528 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100529 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200530 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100531 r = -EBUSY;
532 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100533 set_kvm_facility(kvm->arch.model.fac_mask, 129);
534 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200535 if (test_facility(134)) {
536 set_kvm_facility(kvm->arch.model.fac_mask, 134);
537 set_kvm_facility(kvm->arch.model.fac_list, 134);
538 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100539 if (test_facility(135)) {
540 set_kvm_facility(kvm->arch.model.fac_mask, 135);
541 set_kvm_facility(kvm->arch.model.fac_list, 135);
542 }
Michael Mueller18280d82015-03-16 16:05:41 +0100543 r = 0;
544 } else
545 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100546 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200547 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
548 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400549 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 case KVM_CAP_S390_RI:
551 r = -EINVAL;
552 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200553 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800554 r = -EBUSY;
555 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100556 set_kvm_facility(kvm->arch.model.fac_mask, 64);
557 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800558 r = 0;
559 }
560 mutex_unlock(&kvm->lock);
561 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
562 r ? "(not available)" : "(success)");
563 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100564 case KVM_CAP_S390_AIS:
565 mutex_lock(&kvm->lock);
566 if (kvm->created_vcpus) {
567 r = -EBUSY;
568 } else {
569 set_kvm_facility(kvm->arch.model.fac_mask, 72);
570 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100571 r = 0;
572 }
573 mutex_unlock(&kvm->lock);
574 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
575 r ? "(not available)" : "(success)");
576 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100577 case KVM_CAP_S390_GS:
578 r = -EINVAL;
579 mutex_lock(&kvm->lock);
580 if (atomic_read(&kvm->online_vcpus)) {
581 r = -EBUSY;
582 } else if (test_facility(133)) {
583 set_kvm_facility(kvm->arch.model.fac_mask, 133);
584 set_kvm_facility(kvm->arch.model.fac_list, 133);
585 r = 0;
586 }
587 mutex_unlock(&kvm->lock);
588 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
589 r ? "(not available)" : "(success)");
590 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100591 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200592 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100593 kvm->arch.user_stsi = 1;
594 r = 0;
595 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200596 case KVM_CAP_S390_USER_INSTR0:
597 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
598 kvm->arch.user_instr0 = 1;
599 icpt_operexc_on_all_vcpus(kvm);
600 r = 0;
601 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200602 default:
603 r = -EINVAL;
604 break;
605 }
606 return r;
607}
608
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100609static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
610{
611 int ret;
612
613 switch (attr->attr) {
614 case KVM_S390_VM_MEM_LIMIT_SIZE:
615 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200616 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100617 kvm->arch.mem_limit);
618 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100619 ret = -EFAULT;
620 break;
621 default:
622 ret = -ENXIO;
623 break;
624 }
625 return ret;
626}
627
628static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200629{
630 int ret;
631 unsigned int idx;
632 switch (attr->attr) {
633 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100634 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100635 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200636 break;
637
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200638 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200639 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200640 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200641 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200642 kvm->arch.use_cmma = 1;
643 ret = 0;
644 }
645 mutex_unlock(&kvm->lock);
646 break;
647 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100648 ret = -ENXIO;
649 if (!sclp.has_cmma)
650 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200651 ret = -EINVAL;
652 if (!kvm->arch.use_cmma)
653 break;
654
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200655 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200656 mutex_lock(&kvm->lock);
657 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200658 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200659 srcu_read_unlock(&kvm->srcu, idx);
660 mutex_unlock(&kvm->lock);
661 ret = 0;
662 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100663 case KVM_S390_VM_MEM_LIMIT_SIZE: {
664 unsigned long new_limit;
665
666 if (kvm_is_ucontrol(kvm))
667 return -EINVAL;
668
669 if (get_user(new_limit, (u64 __user *)attr->addr))
670 return -EFAULT;
671
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
673 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100674 return -E2BIG;
675
Dominik Dingela3a92c32014-12-01 17:24:42 +0100676 if (!new_limit)
677 return -EINVAL;
678
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100679 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100680 if (new_limit != KVM_S390_NO_MEM_LIMIT)
681 new_limit -= 1;
682
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100683 ret = -EBUSY;
684 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200685 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100686 /* gmap_create will round the limit up */
687 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100688
689 if (!new) {
690 ret = -ENOMEM;
691 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100692 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100693 new->private = kvm;
694 kvm->arch.gmap = new;
695 ret = 0;
696 }
697 }
698 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100699 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
700 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
701 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100702 break;
703 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200704 default:
705 ret = -ENXIO;
706 break;
707 }
708 return ret;
709}
710
Tony Krowiaka374e892014-09-03 10:13:53 +0200711static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
712
713static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
714{
715 struct kvm_vcpu *vcpu;
716 int i;
717
Michael Mueller9d8d5782015-02-02 15:42:51 +0100718 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200719 return -EINVAL;
720
721 mutex_lock(&kvm->lock);
722 switch (attr->attr) {
723 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
724 get_random_bytes(
725 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
726 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
727 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200728 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200729 break;
730 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
731 get_random_bytes(
732 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
733 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
734 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200735 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200736 break;
737 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
738 kvm->arch.crypto.aes_kw = 0;
739 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
740 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200741 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200742 break;
743 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
744 kvm->arch.crypto.dea_kw = 0;
745 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
746 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200747 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200748 break;
749 default:
750 mutex_unlock(&kvm->lock);
751 return -ENXIO;
752 }
753
754 kvm_for_each_vcpu(i, vcpu, kvm) {
755 kvm_s390_vcpu_crypto_setup(vcpu);
756 exit_sie(vcpu);
757 }
758 mutex_unlock(&kvm->lock);
759 return 0;
760}
761
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200762static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
763{
764 int cx;
765 struct kvm_vcpu *vcpu;
766
767 kvm_for_each_vcpu(cx, vcpu, kvm)
768 kvm_s390_sync_request(req, vcpu);
769}
770
771/*
772 * Must be called with kvm->srcu held to avoid races on memslots, and with
773 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
774 */
775static int kvm_s390_vm_start_migration(struct kvm *kvm)
776{
777 struct kvm_s390_migration_state *mgs;
778 struct kvm_memory_slot *ms;
779 /* should be the only one */
780 struct kvm_memslots *slots;
781 unsigned long ram_pages;
782 int slotnr;
783
784 /* migration mode already enabled */
785 if (kvm->arch.migration_state)
786 return 0;
787
788 slots = kvm_memslots(kvm);
789 if (!slots || !slots->used_slots)
790 return -EINVAL;
791
792 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
793 if (!mgs)
794 return -ENOMEM;
795 kvm->arch.migration_state = mgs;
796
797 if (kvm->arch.use_cmma) {
798 /*
799 * Get the last slot. They should be sorted by base_gfn, so the
800 * last slot is also the one at the end of the address space.
801 * We have verified above that at least one slot is present.
802 */
803 ms = slots->memslots + slots->used_slots - 1;
804 /* round up so we only use full longs */
805 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
806 /* allocate enough bytes to store all the bits */
807 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
808 if (!mgs->pgste_bitmap) {
809 kfree(mgs);
810 kvm->arch.migration_state = NULL;
811 return -ENOMEM;
812 }
813
814 mgs->bitmap_size = ram_pages;
815 atomic64_set(&mgs->dirty_pages, ram_pages);
816 /* mark all the pages in active slots as dirty */
817 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
818 ms = slots->memslots + slotnr;
819 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
820 }
821
822 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
823 }
824 return 0;
825}
826
827/*
828 * Must be called with kvm->lock to avoid races with ourselves and
829 * kvm_s390_vm_start_migration.
830 */
831static int kvm_s390_vm_stop_migration(struct kvm *kvm)
832{
833 struct kvm_s390_migration_state *mgs;
834
835 /* migration mode already disabled */
836 if (!kvm->arch.migration_state)
837 return 0;
838 mgs = kvm->arch.migration_state;
839 kvm->arch.migration_state = NULL;
840
841 if (kvm->arch.use_cmma) {
842 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
843 vfree(mgs->pgste_bitmap);
844 }
845 kfree(mgs);
846 return 0;
847}
848
849static int kvm_s390_vm_set_migration(struct kvm *kvm,
850 struct kvm_device_attr *attr)
851{
852 int idx, res = -ENXIO;
853
854 mutex_lock(&kvm->lock);
855 switch (attr->attr) {
856 case KVM_S390_VM_MIGRATION_START:
857 idx = srcu_read_lock(&kvm->srcu);
858 res = kvm_s390_vm_start_migration(kvm);
859 srcu_read_unlock(&kvm->srcu, idx);
860 break;
861 case KVM_S390_VM_MIGRATION_STOP:
862 res = kvm_s390_vm_stop_migration(kvm);
863 break;
864 default:
865 break;
866 }
867 mutex_unlock(&kvm->lock);
868
869 return res;
870}
871
872static int kvm_s390_vm_get_migration(struct kvm *kvm,
873 struct kvm_device_attr *attr)
874{
875 u64 mig = (kvm->arch.migration_state != NULL);
876
877 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
878 return -ENXIO;
879
880 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
881 return -EFAULT;
882 return 0;
883}
884
Collin L. Walling8fa16962016-07-26 15:29:44 -0400885static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
886{
887 struct kvm_s390_vm_tod_clock gtod;
888
889 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
890 return -EFAULT;
891
892 if (test_kvm_facility(kvm, 139))
893 kvm_s390_set_tod_clock_ext(kvm, &gtod);
894 else if (gtod.epoch_idx == 0)
895 kvm_s390_set_tod_clock(kvm, gtod.tod);
896 else
897 return -EINVAL;
898
899 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
900 gtod.epoch_idx, gtod.tod);
901
902 return 0;
903}
904
Jason J. Herne72f25022014-11-25 09:46:02 -0500905static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
906{
907 u8 gtod_high;
908
909 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
910 sizeof(gtod_high)))
911 return -EFAULT;
912
913 if (gtod_high != 0)
914 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200915 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500916
917 return 0;
918}
919
920static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
921{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200922 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500923
924 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
925 return -EFAULT;
926
David Hildenbrand25ed1672015-05-12 09:49:14 +0200927 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200928 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500929 return 0;
930}
931
932static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
933{
934 int ret;
935
936 if (attr->flags)
937 return -EINVAL;
938
939 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -0400940 case KVM_S390_VM_TOD_EXT:
941 ret = kvm_s390_set_tod_ext(kvm, attr);
942 break;
Jason J. Herne72f25022014-11-25 09:46:02 -0500943 case KVM_S390_VM_TOD_HIGH:
944 ret = kvm_s390_set_tod_high(kvm, attr);
945 break;
946 case KVM_S390_VM_TOD_LOW:
947 ret = kvm_s390_set_tod_low(kvm, attr);
948 break;
949 default:
950 ret = -ENXIO;
951 break;
952 }
953 return ret;
954}
955
Collin L. Walling8fa16962016-07-26 15:29:44 -0400956static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
957 struct kvm_s390_vm_tod_clock *gtod)
958{
959 struct kvm_s390_tod_clock_ext htod;
960
961 preempt_disable();
962
963 get_tod_clock_ext((char *)&htod);
964
965 gtod->tod = htod.tod + kvm->arch.epoch;
966 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
967
968 if (gtod->tod < htod.tod)
969 gtod->epoch_idx += 1;
970
971 preempt_enable();
972}
973
974static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
975{
976 struct kvm_s390_vm_tod_clock gtod;
977
978 memset(&gtod, 0, sizeof(gtod));
979
980 if (test_kvm_facility(kvm, 139))
981 kvm_s390_get_tod_clock_ext(kvm, &gtod);
982 else
983 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
984
985 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
986 return -EFAULT;
987
988 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
989 gtod.epoch_idx, gtod.tod);
990 return 0;
991}
992
Jason J. Herne72f25022014-11-25 09:46:02 -0500993static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
994{
995 u8 gtod_high = 0;
996
997 if (copy_to_user((void __user *)attr->addr, &gtod_high,
998 sizeof(gtod_high)))
999 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001000 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001001
1002 return 0;
1003}
1004
1005static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1006{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001007 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001008
David Hildenbrand60417fc2015-09-29 16:20:36 +02001009 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001010 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1011 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001012 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001013
1014 return 0;
1015}
1016
1017static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1018{
1019 int ret;
1020
1021 if (attr->flags)
1022 return -EINVAL;
1023
1024 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001025 case KVM_S390_VM_TOD_EXT:
1026 ret = kvm_s390_get_tod_ext(kvm, attr);
1027 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001028 case KVM_S390_VM_TOD_HIGH:
1029 ret = kvm_s390_get_tod_high(kvm, attr);
1030 break;
1031 case KVM_S390_VM_TOD_LOW:
1032 ret = kvm_s390_get_tod_low(kvm, attr);
1033 break;
1034 default:
1035 ret = -ENXIO;
1036 break;
1037 }
1038 return ret;
1039}
1040
Michael Mueller658b6ed2015-02-02 15:49:35 +01001041static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1042{
1043 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001044 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001045 int ret = 0;
1046
1047 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001048 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001049 ret = -EBUSY;
1050 goto out;
1051 }
1052 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1053 if (!proc) {
1054 ret = -ENOMEM;
1055 goto out;
1056 }
1057 if (!copy_from_user(proc, (void __user *)attr->addr,
1058 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001059 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001060 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1061 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001062 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001063 if (proc->ibc > unblocked_ibc)
1064 kvm->arch.model.ibc = unblocked_ibc;
1065 else if (proc->ibc < lowest_ibc)
1066 kvm->arch.model.ibc = lowest_ibc;
1067 else
1068 kvm->arch.model.ibc = proc->ibc;
1069 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001070 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001071 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001072 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1073 kvm->arch.model.ibc,
1074 kvm->arch.model.cpuid);
1075 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1076 kvm->arch.model.fac_list[0],
1077 kvm->arch.model.fac_list[1],
1078 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001079 } else
1080 ret = -EFAULT;
1081 kfree(proc);
1082out:
1083 mutex_unlock(&kvm->lock);
1084 return ret;
1085}
1086
David Hildenbrand15c97052015-03-19 17:36:43 +01001087static int kvm_s390_set_processor_feat(struct kvm *kvm,
1088 struct kvm_device_attr *attr)
1089{
1090 struct kvm_s390_vm_cpu_feat data;
1091 int ret = -EBUSY;
1092
1093 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1094 return -EFAULT;
1095 if (!bitmap_subset((unsigned long *) data.feat,
1096 kvm_s390_available_cpu_feat,
1097 KVM_S390_VM_CPU_FEAT_NR_BITS))
1098 return -EINVAL;
1099
1100 mutex_lock(&kvm->lock);
1101 if (!atomic_read(&kvm->online_vcpus)) {
1102 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1103 KVM_S390_VM_CPU_FEAT_NR_BITS);
1104 ret = 0;
1105 }
1106 mutex_unlock(&kvm->lock);
1107 return ret;
1108}
1109
David Hildenbrand0a763c72016-05-18 16:03:47 +02001110static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1111 struct kvm_device_attr *attr)
1112{
1113 /*
1114 * Once supported by kernel + hw, we have to store the subfunctions
1115 * in kvm->arch and remember that user space configured them.
1116 */
1117 return -ENXIO;
1118}
1119
Michael Mueller658b6ed2015-02-02 15:49:35 +01001120static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1121{
1122 int ret = -ENXIO;
1123
1124 switch (attr->attr) {
1125 case KVM_S390_VM_CPU_PROCESSOR:
1126 ret = kvm_s390_set_processor(kvm, attr);
1127 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001128 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1129 ret = kvm_s390_set_processor_feat(kvm, attr);
1130 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001131 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1132 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1133 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001134 }
1135 return ret;
1136}
1137
1138static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1139{
1140 struct kvm_s390_vm_cpu_processor *proc;
1141 int ret = 0;
1142
1143 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1144 if (!proc) {
1145 ret = -ENOMEM;
1146 goto out;
1147 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001148 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001149 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001150 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1151 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001152 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1153 kvm->arch.model.ibc,
1154 kvm->arch.model.cpuid);
1155 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1156 kvm->arch.model.fac_list[0],
1157 kvm->arch.model.fac_list[1],
1158 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001159 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1160 ret = -EFAULT;
1161 kfree(proc);
1162out:
1163 return ret;
1164}
1165
1166static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1167{
1168 struct kvm_s390_vm_cpu_machine *mach;
1169 int ret = 0;
1170
1171 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1172 if (!mach) {
1173 ret = -ENOMEM;
1174 goto out;
1175 }
1176 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001177 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001178 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001179 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001180 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001181 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001182 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1183 kvm->arch.model.ibc,
1184 kvm->arch.model.cpuid);
1185 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1186 mach->fac_mask[0],
1187 mach->fac_mask[1],
1188 mach->fac_mask[2]);
1189 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1190 mach->fac_list[0],
1191 mach->fac_list[1],
1192 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001193 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1194 ret = -EFAULT;
1195 kfree(mach);
1196out:
1197 return ret;
1198}
1199
David Hildenbrand15c97052015-03-19 17:36:43 +01001200static int kvm_s390_get_processor_feat(struct kvm *kvm,
1201 struct kvm_device_attr *attr)
1202{
1203 struct kvm_s390_vm_cpu_feat data;
1204
1205 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1206 KVM_S390_VM_CPU_FEAT_NR_BITS);
1207 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1208 return -EFAULT;
1209 return 0;
1210}
1211
1212static int kvm_s390_get_machine_feat(struct kvm *kvm,
1213 struct kvm_device_attr *attr)
1214{
1215 struct kvm_s390_vm_cpu_feat data;
1216
1217 bitmap_copy((unsigned long *) data.feat,
1218 kvm_s390_available_cpu_feat,
1219 KVM_S390_VM_CPU_FEAT_NR_BITS);
1220 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1221 return -EFAULT;
1222 return 0;
1223}
1224
David Hildenbrand0a763c72016-05-18 16:03:47 +02001225static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1226 struct kvm_device_attr *attr)
1227{
1228 /*
1229 * Once we can actually configure subfunctions (kernel + hw support),
1230 * we have to check if they were already set by user space, if so copy
1231 * them from kvm->arch.
1232 */
1233 return -ENXIO;
1234}
1235
1236static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1237 struct kvm_device_attr *attr)
1238{
1239 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1240 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1241 return -EFAULT;
1242 return 0;
1243}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001244static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1245{
1246 int ret = -ENXIO;
1247
1248 switch (attr->attr) {
1249 case KVM_S390_VM_CPU_PROCESSOR:
1250 ret = kvm_s390_get_processor(kvm, attr);
1251 break;
1252 case KVM_S390_VM_CPU_MACHINE:
1253 ret = kvm_s390_get_machine(kvm, attr);
1254 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001255 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1256 ret = kvm_s390_get_processor_feat(kvm, attr);
1257 break;
1258 case KVM_S390_VM_CPU_MACHINE_FEAT:
1259 ret = kvm_s390_get_machine_feat(kvm, attr);
1260 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001261 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1262 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1263 break;
1264 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1265 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1266 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001267 }
1268 return ret;
1269}
1270
Dominik Dingelf2061652014-04-09 13:13:00 +02001271static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1272{
1273 int ret;
1274
1275 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001276 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001277 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001278 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001279 case KVM_S390_VM_TOD:
1280 ret = kvm_s390_set_tod(kvm, attr);
1281 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001282 case KVM_S390_VM_CPU_MODEL:
1283 ret = kvm_s390_set_cpu_model(kvm, attr);
1284 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001285 case KVM_S390_VM_CRYPTO:
1286 ret = kvm_s390_vm_set_crypto(kvm, attr);
1287 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001288 case KVM_S390_VM_MIGRATION:
1289 ret = kvm_s390_vm_set_migration(kvm, attr);
1290 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001291 default:
1292 ret = -ENXIO;
1293 break;
1294 }
1295
1296 return ret;
1297}
1298
1299static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1300{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001301 int ret;
1302
1303 switch (attr->group) {
1304 case KVM_S390_VM_MEM_CTRL:
1305 ret = kvm_s390_get_mem_control(kvm, attr);
1306 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001307 case KVM_S390_VM_TOD:
1308 ret = kvm_s390_get_tod(kvm, attr);
1309 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001310 case KVM_S390_VM_CPU_MODEL:
1311 ret = kvm_s390_get_cpu_model(kvm, attr);
1312 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001313 case KVM_S390_VM_MIGRATION:
1314 ret = kvm_s390_vm_get_migration(kvm, attr);
1315 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001316 default:
1317 ret = -ENXIO;
1318 break;
1319 }
1320
1321 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001322}
1323
1324static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1325{
1326 int ret;
1327
1328 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001329 case KVM_S390_VM_MEM_CTRL:
1330 switch (attr->attr) {
1331 case KVM_S390_VM_MEM_ENABLE_CMMA:
1332 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001333 ret = sclp.has_cmma ? 0 : -ENXIO;
1334 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001335 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001336 ret = 0;
1337 break;
1338 default:
1339 ret = -ENXIO;
1340 break;
1341 }
1342 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001343 case KVM_S390_VM_TOD:
1344 switch (attr->attr) {
1345 case KVM_S390_VM_TOD_LOW:
1346 case KVM_S390_VM_TOD_HIGH:
1347 ret = 0;
1348 break;
1349 default:
1350 ret = -ENXIO;
1351 break;
1352 }
1353 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001354 case KVM_S390_VM_CPU_MODEL:
1355 switch (attr->attr) {
1356 case KVM_S390_VM_CPU_PROCESSOR:
1357 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001358 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1359 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001360 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001361 ret = 0;
1362 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001363 /* configuring subfunctions is not supported yet */
1364 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001365 default:
1366 ret = -ENXIO;
1367 break;
1368 }
1369 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001370 case KVM_S390_VM_CRYPTO:
1371 switch (attr->attr) {
1372 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1373 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1374 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1375 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1376 ret = 0;
1377 break;
1378 default:
1379 ret = -ENXIO;
1380 break;
1381 }
1382 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001383 case KVM_S390_VM_MIGRATION:
1384 ret = 0;
1385 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001386 default:
1387 ret = -ENXIO;
1388 break;
1389 }
1390
1391 return ret;
1392}
1393
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001394static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1395{
1396 uint8_t *keys;
1397 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001398 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001399
1400 if (args->flags != 0)
1401 return -EINVAL;
1402
1403 /* Is this guest using storage keys? */
1404 if (!mm_use_skey(current->mm))
1405 return KVM_S390_GET_SKEYS_NONE;
1406
1407 /* Enforce sane limit on memory allocation */
1408 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1409 return -EINVAL;
1410
Michal Hocko752ade62017-05-08 15:57:27 -07001411 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001412 if (!keys)
1413 return -ENOMEM;
1414
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001415 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001416 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001417 for (i = 0; i < args->count; i++) {
1418 hva = gfn_to_hva(kvm, args->start_gfn + i);
1419 if (kvm_is_error_hva(hva)) {
1420 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001421 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001422 }
1423
David Hildenbrand154c8c12016-05-09 11:22:34 +02001424 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1425 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001426 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001427 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001428 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001429 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001430
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001431 if (!r) {
1432 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1433 sizeof(uint8_t) * args->count);
1434 if (r)
1435 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001436 }
1437
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001438 kvfree(keys);
1439 return r;
1440}
1441
1442static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1443{
1444 uint8_t *keys;
1445 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001446 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001447
1448 if (args->flags != 0)
1449 return -EINVAL;
1450
1451 /* Enforce sane limit on memory allocation */
1452 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1453 return -EINVAL;
1454
Michal Hocko752ade62017-05-08 15:57:27 -07001455 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001456 if (!keys)
1457 return -ENOMEM;
1458
1459 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1460 sizeof(uint8_t) * args->count);
1461 if (r) {
1462 r = -EFAULT;
1463 goto out;
1464 }
1465
1466 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001467 r = s390_enable_skey();
1468 if (r)
1469 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001470
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001471 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001472 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001473 for (i = 0; i < args->count; i++) {
1474 hva = gfn_to_hva(kvm, args->start_gfn + i);
1475 if (kvm_is_error_hva(hva)) {
1476 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001477 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001478 }
1479
1480 /* Lowest order bit is reserved */
1481 if (keys[i] & 0x01) {
1482 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001483 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001484 }
1485
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001486 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001487 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001488 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001489 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001490 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001491 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001492out:
1493 kvfree(keys);
1494 return r;
1495}
1496
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001497/*
1498 * Base address and length must be sent at the start of each block, therefore
1499 * it's cheaper to send some clean data, as long as it's less than the size of
1500 * two longs.
1501 */
1502#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1503/* for consistency */
1504#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1505
1506/*
1507 * This function searches for the next page with dirty CMMA attributes, and
1508 * saves the attributes in the buffer up to either the end of the buffer or
1509 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1510 * no trailing clean bytes are saved.
1511 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1512 * output buffer will indicate 0 as length.
1513 */
1514static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1515 struct kvm_s390_cmma_log *args)
1516{
1517 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1518 unsigned long bufsize, hva, pgstev, i, next, cur;
1519 int srcu_idx, peek, r = 0, rr;
1520 u8 *res;
1521
1522 cur = args->start_gfn;
1523 i = next = pgstev = 0;
1524
1525 if (unlikely(!kvm->arch.use_cmma))
1526 return -ENXIO;
1527 /* Invalid/unsupported flags were specified */
1528 if (args->flags & ~KVM_S390_CMMA_PEEK)
1529 return -EINVAL;
1530 /* Migration mode query, and we are not doing a migration */
1531 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1532 if (!peek && !s)
1533 return -EINVAL;
1534 /* CMMA is disabled or was not used, or the buffer has length zero */
1535 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1536 if (!bufsize || !kvm->mm->context.use_cmma) {
1537 memset(args, 0, sizeof(*args));
1538 return 0;
1539 }
1540
1541 if (!peek) {
1542 /* We are not peeking, and there are no dirty pages */
1543 if (!atomic64_read(&s->dirty_pages)) {
1544 memset(args, 0, sizeof(*args));
1545 return 0;
1546 }
1547 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1548 args->start_gfn);
1549 if (cur >= s->bitmap_size) /* nothing found, loop back */
1550 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1551 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1552 memset(args, 0, sizeof(*args));
1553 return 0;
1554 }
1555 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1556 }
1557
1558 res = vmalloc(bufsize);
1559 if (!res)
1560 return -ENOMEM;
1561
1562 args->start_gfn = cur;
1563
1564 down_read(&kvm->mm->mmap_sem);
1565 srcu_idx = srcu_read_lock(&kvm->srcu);
1566 while (i < bufsize) {
1567 hva = gfn_to_hva(kvm, cur);
1568 if (kvm_is_error_hva(hva)) {
1569 r = -EFAULT;
1570 break;
1571 }
1572 /* decrement only if we actually flipped the bit to 0 */
1573 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1574 atomic64_dec(&s->dirty_pages);
1575 r = get_pgste(kvm->mm, hva, &pgstev);
1576 if (r < 0)
1577 pgstev = 0;
1578 /* save the value */
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001579 res[i++] = (pgstev >> 24) & 0x43;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001580 /*
1581 * if the next bit is too far away, stop.
1582 * if we reached the previous "next", find the next one
1583 */
1584 if (!peek) {
1585 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1586 break;
1587 if (cur == next)
1588 next = find_next_bit(s->pgste_bitmap,
1589 s->bitmap_size, cur + 1);
1590 /* reached the end of the bitmap or of the buffer, stop */
1591 if ((next >= s->bitmap_size) ||
1592 (next >= args->start_gfn + bufsize))
1593 break;
1594 }
1595 cur++;
1596 }
1597 srcu_read_unlock(&kvm->srcu, srcu_idx);
1598 up_read(&kvm->mm->mmap_sem);
1599 args->count = i;
1600 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1601
1602 rr = copy_to_user((void __user *)args->values, res, args->count);
1603 if (rr)
1604 r = -EFAULT;
1605
1606 vfree(res);
1607 return r;
1608}
1609
1610/*
1611 * This function sets the CMMA attributes for the given pages. If the input
1612 * buffer has zero length, no action is taken, otherwise the attributes are
1613 * set and the mm->context.use_cmma flag is set.
1614 */
1615static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1616 const struct kvm_s390_cmma_log *args)
1617{
1618 unsigned long hva, mask, pgstev, i;
1619 uint8_t *bits;
1620 int srcu_idx, r = 0;
1621
1622 mask = args->mask;
1623
1624 if (!kvm->arch.use_cmma)
1625 return -ENXIO;
1626 /* invalid/unsupported flags */
1627 if (args->flags != 0)
1628 return -EINVAL;
1629 /* Enforce sane limit on memory allocation */
1630 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1631 return -EINVAL;
1632 /* Nothing to do */
1633 if (args->count == 0)
1634 return 0;
1635
1636 bits = vmalloc(sizeof(*bits) * args->count);
1637 if (!bits)
1638 return -ENOMEM;
1639
1640 r = copy_from_user(bits, (void __user *)args->values, args->count);
1641 if (r) {
1642 r = -EFAULT;
1643 goto out;
1644 }
1645
1646 down_read(&kvm->mm->mmap_sem);
1647 srcu_idx = srcu_read_lock(&kvm->srcu);
1648 for (i = 0; i < args->count; i++) {
1649 hva = gfn_to_hva(kvm, args->start_gfn + i);
1650 if (kvm_is_error_hva(hva)) {
1651 r = -EFAULT;
1652 break;
1653 }
1654
1655 pgstev = bits[i];
1656 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001657 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001658 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1659 }
1660 srcu_read_unlock(&kvm->srcu, srcu_idx);
1661 up_read(&kvm->mm->mmap_sem);
1662
1663 if (!kvm->mm->context.use_cmma) {
1664 down_write(&kvm->mm->mmap_sem);
1665 kvm->mm->context.use_cmma = 1;
1666 up_write(&kvm->mm->mmap_sem);
1667 }
1668out:
1669 vfree(bits);
1670 return r;
1671}
1672
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001673long kvm_arch_vm_ioctl(struct file *filp,
1674 unsigned int ioctl, unsigned long arg)
1675{
1676 struct kvm *kvm = filp->private_data;
1677 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001678 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001679 int r;
1680
1681 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001682 case KVM_S390_INTERRUPT: {
1683 struct kvm_s390_interrupt s390int;
1684
1685 r = -EFAULT;
1686 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1687 break;
1688 r = kvm_s390_inject_vm(kvm, &s390int);
1689 break;
1690 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001691 case KVM_ENABLE_CAP: {
1692 struct kvm_enable_cap cap;
1693 r = -EFAULT;
1694 if (copy_from_user(&cap, argp, sizeof(cap)))
1695 break;
1696 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1697 break;
1698 }
Cornelia Huck84223592013-07-15 13:36:01 +02001699 case KVM_CREATE_IRQCHIP: {
1700 struct kvm_irq_routing_entry routing;
1701
1702 r = -EINVAL;
1703 if (kvm->arch.use_irqchip) {
1704 /* Set up dummy routing. */
1705 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001706 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001707 }
1708 break;
1709 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001710 case KVM_SET_DEVICE_ATTR: {
1711 r = -EFAULT;
1712 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1713 break;
1714 r = kvm_s390_vm_set_attr(kvm, &attr);
1715 break;
1716 }
1717 case KVM_GET_DEVICE_ATTR: {
1718 r = -EFAULT;
1719 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1720 break;
1721 r = kvm_s390_vm_get_attr(kvm, &attr);
1722 break;
1723 }
1724 case KVM_HAS_DEVICE_ATTR: {
1725 r = -EFAULT;
1726 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1727 break;
1728 r = kvm_s390_vm_has_attr(kvm, &attr);
1729 break;
1730 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001731 case KVM_S390_GET_SKEYS: {
1732 struct kvm_s390_skeys args;
1733
1734 r = -EFAULT;
1735 if (copy_from_user(&args, argp,
1736 sizeof(struct kvm_s390_skeys)))
1737 break;
1738 r = kvm_s390_get_skeys(kvm, &args);
1739 break;
1740 }
1741 case KVM_S390_SET_SKEYS: {
1742 struct kvm_s390_skeys args;
1743
1744 r = -EFAULT;
1745 if (copy_from_user(&args, argp,
1746 sizeof(struct kvm_s390_skeys)))
1747 break;
1748 r = kvm_s390_set_skeys(kvm, &args);
1749 break;
1750 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001751 case KVM_S390_GET_CMMA_BITS: {
1752 struct kvm_s390_cmma_log args;
1753
1754 r = -EFAULT;
1755 if (copy_from_user(&args, argp, sizeof(args)))
1756 break;
1757 r = kvm_s390_get_cmma_bits(kvm, &args);
1758 if (!r) {
1759 r = copy_to_user(argp, &args, sizeof(args));
1760 if (r)
1761 r = -EFAULT;
1762 }
1763 break;
1764 }
1765 case KVM_S390_SET_CMMA_BITS: {
1766 struct kvm_s390_cmma_log args;
1767
1768 r = -EFAULT;
1769 if (copy_from_user(&args, argp, sizeof(args)))
1770 break;
1771 r = kvm_s390_set_cmma_bits(kvm, &args);
1772 break;
1773 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001774 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001775 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001776 }
1777
1778 return r;
1779}
1780
Tony Krowiak45c9b472015-01-13 11:33:26 -05001781static int kvm_s390_query_ap_config(u8 *config)
1782{
1783 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001784 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001785
Christian Borntraeger86044c82015-02-26 13:53:47 +01001786 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001787 asm volatile(
1788 "lgr 0,%1\n"
1789 "lgr 2,%2\n"
1790 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001791 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001792 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001793 "1:\n"
1794 EX_TABLE(0b, 1b)
1795 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001796 : "r" (fcn_code), "r" (config)
1797 : "cc", "0", "2", "memory"
1798 );
1799
1800 return cc;
1801}
1802
1803static int kvm_s390_apxa_installed(void)
1804{
1805 u8 config[128];
1806 int cc;
1807
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001808 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001809 cc = kvm_s390_query_ap_config(config);
1810
1811 if (cc)
1812 pr_err("PQAP(QCI) failed with cc=%d", cc);
1813 else
1814 return config[0] & 0x40;
1815 }
1816
1817 return 0;
1818}
1819
1820static void kvm_s390_set_crycb_format(struct kvm *kvm)
1821{
1822 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1823
1824 if (kvm_s390_apxa_installed())
1825 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1826 else
1827 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1828}
1829
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001830static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001831{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001832 struct cpuid cpuid;
1833
1834 get_cpu_id(&cpuid);
1835 cpuid.version = 0xff;
1836 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001837}
1838
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001839static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001840{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001841 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001842 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001843
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001844 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001845 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001846
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001847 /* Enable AES/DEA protected key functions by default */
1848 kvm->arch.crypto.aes_kw = 1;
1849 kvm->arch.crypto.dea_kw = 1;
1850 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1851 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1852 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1853 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001854}
1855
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001856static void sca_dispose(struct kvm *kvm)
1857{
1858 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001859 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001860 else
1861 free_page((unsigned long)(kvm->arch.sca));
1862 kvm->arch.sca = NULL;
1863}
1864
Carsten Ottee08b9632012-01-04 10:25:20 +01001865int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001866{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001867 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001868 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001869 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001870 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001871
Carsten Ottee08b9632012-01-04 10:25:20 +01001872 rc = -EINVAL;
1873#ifdef CONFIG_KVM_S390_UCONTROL
1874 if (type & ~KVM_VM_S390_UCONTROL)
1875 goto out_err;
1876 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1877 goto out_err;
1878#else
1879 if (type)
1880 goto out_err;
1881#endif
1882
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001883 rc = s390_enable_sie();
1884 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001885 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001886
Carsten Otteb2904112011-10-18 12:27:13 +02001887 rc = -ENOMEM;
1888
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001889 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001890 if (!sclp.has_64bscao)
1891 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001892 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001893 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001894 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001895 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001896 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001897 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001898 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001899 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001900 kvm->arch.sca = (struct bsca_block *)
1901 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001902 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001903
1904 sprintf(debug_name, "kvm-%u", current->pid);
1905
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001906 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001907 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001908 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001909
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001910 kvm->arch.sie_page2 =
1911 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1912 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001913 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001914
Michael Muellerfb5bf932015-02-27 14:25:10 +01001915 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001916 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001917 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001918 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1919 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001920 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001921 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001922 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001923 }
1924
Michael Mueller981467c2015-02-24 13:51:04 +01001925 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001926 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1927 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001928 S390_ARCH_FAC_LIST_SIZE_BYTE);
1929
David Hildenbrand19352222017-08-29 16:31:08 +02001930 /* we are always in czam mode - even on pre z14 machines */
1931 set_kvm_facility(kvm->arch.model.fac_mask, 138);
1932 set_kvm_facility(kvm->arch.model.fac_list, 138);
1933 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001934 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1935 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02001936 if (MACHINE_HAS_TLB_GUEST) {
1937 set_kvm_facility(kvm->arch.model.fac_mask, 147);
1938 set_kvm_facility(kvm->arch.model.fac_list, 147);
1939 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001940
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001941 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001942 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001943
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001944 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001945
Fei Li51978392017-02-17 17:06:26 +08001946 mutex_init(&kvm->arch.float_int.ais_lock);
1947 kvm->arch.float_int.simm = 0;
1948 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001949 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001950 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1951 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001952 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001953 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001954
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001955 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001956 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001957
Carsten Ottee08b9632012-01-04 10:25:20 +01001958 if (type & KVM_VM_S390_UCONTROL) {
1959 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001960 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001961 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001962 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001963 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001964 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001965 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001966 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001967 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001968 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001969 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001970 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001971 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001972 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001973
1974 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001975 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001976 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001977
David Hildenbrand8ad35752014-03-14 11:00:21 +01001978 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001979 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001980 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001981
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001982 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001983out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001984 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001985 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001986 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001987 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001988 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001989}
1990
Luiz Capitulino235539b2016-09-07 14:47:23 -04001991bool kvm_arch_has_vcpu_debugfs(void)
1992{
1993 return false;
1994}
1995
1996int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1997{
1998 return 0;
1999}
2000
Christian Borntraegerd329c032008-11-26 14:50:27 +01002001void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2002{
2003 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002004 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002005 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002006 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002007 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002008 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002009
2010 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002011 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002012
Dominik Dingele6db1d62015-05-07 15:41:57 +02002013 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002014 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002015 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002016
Christian Borntraeger6692cef2008-11-26 14:51:08 +01002017 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02002018 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002019}
2020
2021static void kvm_free_vcpus(struct kvm *kvm)
2022{
2023 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002024 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002025
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002026 kvm_for_each_vcpu(i, vcpu, kvm)
2027 kvm_arch_vcpu_destroy(vcpu);
2028
2029 mutex_lock(&kvm->lock);
2030 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2031 kvm->vcpus[i] = NULL;
2032
2033 atomic_set(&kvm->online_vcpus, 0);
2034 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002035}
2036
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002037void kvm_arch_destroy_vm(struct kvm *kvm)
2038{
Christian Borntraegerd329c032008-11-26 14:50:27 +01002039 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002040 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002041 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002042 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002043 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002044 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002045 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002046 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002047 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002048 if (kvm->arch.migration_state) {
2049 vfree(kvm->arch.migration_state->pgste_bitmap);
2050 kfree(kvm->arch.migration_state);
2051 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002052 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002053}
2054
2055/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002056static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2057{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002058 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002059 if (!vcpu->arch.gmap)
2060 return -ENOMEM;
2061 vcpu->arch.gmap->private = vcpu->kvm;
2062
2063 return 0;
2064}
2065
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002066static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2067{
David Hildenbranda6940672016-08-08 22:39:32 +02002068 if (!kvm_s390_use_sca_entries())
2069 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002070 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002071 if (vcpu->kvm->arch.use_esca) {
2072 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002073
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002074 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002075 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002076 } else {
2077 struct bsca_block *sca = vcpu->kvm->arch.sca;
2078
2079 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002080 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002081 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002082 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002083}
2084
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002085static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002086{
David Hildenbranda6940672016-08-08 22:39:32 +02002087 if (!kvm_s390_use_sca_entries()) {
2088 struct bsca_block *sca = vcpu->kvm->arch.sca;
2089
2090 /* we still need the basic sca for the ipte control */
2091 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2092 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2093 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002094 read_lock(&vcpu->kvm->arch.sca_lock);
2095 if (vcpu->kvm->arch.use_esca) {
2096 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002097
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002098 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002099 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2100 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002101 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002102 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002103 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002104 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002105
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002106 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002107 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2108 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002109 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002110 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002111 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002112}
2113
2114/* Basic SCA to Extended SCA data copy routines */
2115static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2116{
2117 d->sda = s->sda;
2118 d->sigp_ctrl.c = s->sigp_ctrl.c;
2119 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2120}
2121
2122static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2123{
2124 int i;
2125
2126 d->ipte_control = s->ipte_control;
2127 d->mcn[0] = s->mcn;
2128 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2129 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2130}
2131
2132static int sca_switch_to_extended(struct kvm *kvm)
2133{
2134 struct bsca_block *old_sca = kvm->arch.sca;
2135 struct esca_block *new_sca;
2136 struct kvm_vcpu *vcpu;
2137 unsigned int vcpu_idx;
2138 u32 scaol, scaoh;
2139
2140 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2141 if (!new_sca)
2142 return -ENOMEM;
2143
2144 scaoh = (u32)((u64)(new_sca) >> 32);
2145 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2146
2147 kvm_s390_vcpu_block_all(kvm);
2148 write_lock(&kvm->arch.sca_lock);
2149
2150 sca_copy_b_to_e(new_sca, old_sca);
2151
2152 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2153 vcpu->arch.sie_block->scaoh = scaoh;
2154 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002155 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002156 }
2157 kvm->arch.sca = new_sca;
2158 kvm->arch.use_esca = 1;
2159
2160 write_unlock(&kvm->arch.sca_lock);
2161 kvm_s390_vcpu_unblock_all(kvm);
2162
2163 free_page((unsigned long)old_sca);
2164
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002165 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2166 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002167 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002168}
2169
2170static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2171{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002172 int rc;
2173
David Hildenbranda6940672016-08-08 22:39:32 +02002174 if (!kvm_s390_use_sca_entries()) {
2175 if (id < KVM_MAX_VCPUS)
2176 return true;
2177 return false;
2178 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002179 if (id < KVM_S390_BSCA_CPU_SLOTS)
2180 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002181 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002182 return false;
2183
2184 mutex_lock(&kvm->lock);
2185 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2186 mutex_unlock(&kvm->lock);
2187
2188 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002189}
2190
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002191int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2192{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002193 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2194 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002195 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2196 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002197 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002198 KVM_SYNC_CRS |
2199 KVM_SYNC_ARCH0 |
2200 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002201 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002202 if (test_kvm_facility(vcpu->kvm, 64))
2203 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002204 if (test_kvm_facility(vcpu->kvm, 133))
2205 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002206 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2207 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2208 */
2209 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002210 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002211 else
2212 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002213
2214 if (kvm_is_ucontrol(vcpu->kvm))
2215 return __kvm_ucontrol_vcpu_init(vcpu);
2216
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002217 return 0;
2218}
2219
David Hildenbranddb0758b2016-02-15 09:42:25 +01002220/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2221static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2222{
2223 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002224 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002225 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002226 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002227}
2228
2229/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2230static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2231{
2232 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002233 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002234 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2235 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002236 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002237}
2238
2239/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2240static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2241{
2242 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2243 vcpu->arch.cputm_enabled = true;
2244 __start_cpu_timer_accounting(vcpu);
2245}
2246
2247/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2248static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2249{
2250 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2251 __stop_cpu_timer_accounting(vcpu);
2252 vcpu->arch.cputm_enabled = false;
2253}
2254
2255static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2256{
2257 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2258 __enable_cpu_timer_accounting(vcpu);
2259 preempt_enable();
2260}
2261
2262static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2263{
2264 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2265 __disable_cpu_timer_accounting(vcpu);
2266 preempt_enable();
2267}
2268
David Hildenbrand4287f242016-02-15 09:40:12 +01002269/* set the cpu timer - may only be called from the VCPU thread itself */
2270void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2271{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002272 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002273 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002274 if (vcpu->arch.cputm_enabled)
2275 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002276 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002277 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002278 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002279}
2280
David Hildenbranddb0758b2016-02-15 09:42:25 +01002281/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002282__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2283{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002284 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002285 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002286
2287 if (unlikely(!vcpu->arch.cputm_enabled))
2288 return vcpu->arch.sie_block->cputm;
2289
David Hildenbrand9c23a132016-02-17 21:53:33 +01002290 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2291 do {
2292 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2293 /*
2294 * If the writer would ever execute a read in the critical
2295 * section, e.g. in irq context, we have a deadlock.
2296 */
2297 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2298 value = vcpu->arch.sie_block->cputm;
2299 /* if cputm_start is 0, accounting is being started/stopped */
2300 if (likely(vcpu->arch.cputm_start))
2301 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2302 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2303 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002304 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002305}
2306
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002307void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2308{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002309
David Hildenbrand37d9df92015-03-11 16:47:33 +01002310 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002311 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002312 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002313 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002314 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002315}
2316
2317void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2318{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002319 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002320 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002321 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002322 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002323 vcpu->arch.enabled_gmap = gmap_get_enabled();
2324 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002325
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002326}
2327
2328static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2329{
2330 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2331 vcpu->arch.sie_block->gpsw.mask = 0UL;
2332 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002333 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002334 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002335 vcpu->arch.sie_block->ckc = 0UL;
2336 vcpu->arch.sie_block->todpr = 0;
2337 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2338 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2339 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002340 /* make sure the new fpc will be lazily loaded */
2341 save_fpu_regs();
2342 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002343 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002344 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002345 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2346 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002347 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2348 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002349 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002350}
2351
Dominik Dingel31928aa2014-12-04 15:47:07 +01002352void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002353{
Jason J. Herne72f25022014-11-25 09:46:02 -05002354 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002355 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002356 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002357 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002358 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002359 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002360 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002361 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002362 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002363 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2364 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002365 /* make vcpu_load load the right gmap on the first trigger */
2366 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002367}
2368
Tony Krowiak5102ee82014-06-27 14:46:01 -04002369static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2370{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002371 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002372 return;
2373
Tony Krowiaka374e892014-09-03 10:13:53 +02002374 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2375
2376 if (vcpu->kvm->arch.crypto.aes_kw)
2377 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2378 if (vcpu->kvm->arch.crypto.dea_kw)
2379 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2380
Tony Krowiak5102ee82014-06-27 14:46:01 -04002381 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2382}
2383
Dominik Dingelb31605c2014-03-25 13:47:11 +01002384void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2385{
2386 free_page(vcpu->arch.sie_block->cbrlo);
2387 vcpu->arch.sie_block->cbrlo = 0;
2388}
2389
2390int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2391{
2392 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2393 if (!vcpu->arch.sie_block->cbrlo)
2394 return -ENOMEM;
2395
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002396 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002397 return 0;
2398}
2399
Michael Mueller91520f12015-02-27 14:32:11 +01002400static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2401{
2402 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2403
Michael Mueller91520f12015-02-27 14:32:11 +01002404 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002405 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002406 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002407}
2408
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2410{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002411 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002412
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002413 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2414 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002415 CPUSTAT_STOPPED);
2416
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002417 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002418 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002419 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002420 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002421
Michael Mueller91520f12015-02-27 14:32:11 +01002422 kvm_s390_vcpu_setup_model(vcpu);
2423
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002424 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2425 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002426 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002427 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002428 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002429 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002430 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002431
David Hildenbrand873b4252016-04-04 15:53:47 +02002432 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002433 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002434 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002435 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2436 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002437 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002438 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002439 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002440 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002441 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002442 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002443 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002444 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002445 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002446 vcpu->arch.sie_block->eca |= ECA_VX;
2447 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002448 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04002449 if (test_kvm_facility(vcpu->kvm, 139))
2450 vcpu->arch.sie_block->ecd |= ECD_MEF;
2451
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002452 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2453 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002454 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002455
2456 if (sclp.has_kss)
2457 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2458 else
2459 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002460
Dominik Dingele6db1d62015-05-07 15:41:57 +02002461 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002462 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2463 if (rc)
2464 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002465 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002466 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002467 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002468
Tony Krowiak5102ee82014-06-27 14:46:01 -04002469 kvm_s390_vcpu_crypto_setup(vcpu);
2470
Dominik Dingelb31605c2014-03-25 13:47:11 +01002471 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002472}
2473
2474struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2475 unsigned int id)
2476{
Carsten Otte4d475552011-10-18 12:27:12 +02002477 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002478 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002479 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002480
David Hildenbrand42158252015-10-12 12:57:22 +02002481 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002482 goto out;
2483
2484 rc = -ENOMEM;
2485
Michael Muellerb110fea2013-06-12 13:54:54 +02002486 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002487 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002488 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002489
QingFeng Haoda72ca42017-06-07 11:41:19 +02002490 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002491 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2492 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002493 goto out_free_cpu;
2494
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002495 vcpu->arch.sie_block = &sie_page->sie_block;
2496 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2497
David Hildenbrandefed1102015-04-16 12:32:41 +02002498 /* the real guest size will always be smaller than msl */
2499 vcpu->arch.sie_block->mso = 0;
2500 vcpu->arch.sie_block->msl = sclp.hamax;
2501
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002502 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002503 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002504 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002505 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002506 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002507 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002508
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002509 rc = kvm_vcpu_init(vcpu, kvm, id);
2510 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002511 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002512 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002513 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002514 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002515
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002516 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002517out_free_sie_block:
2518 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002519out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002520 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002521out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002522 return ERR_PTR(rc);
2523}
2524
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002525int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2526{
David Hildenbrand9a022062014-08-05 17:40:47 +02002527 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002528}
2529
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002530bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2531{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002532 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002533}
2534
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002535void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002536{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002537 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002538 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002539}
2540
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002541void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002542{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002543 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002544}
2545
Christian Borntraeger8e236542015-04-09 13:49:04 +02002546static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2547{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002548 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002549 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002550}
2551
2552static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2553{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002554 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002555}
2556
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002557/*
2558 * Kick a guest cpu out of SIE and wait until SIE is not running.
2559 * If the CPU is not running (e.g. waiting as idle) the function will
2560 * return immediately. */
2561void exit_sie(struct kvm_vcpu *vcpu)
2562{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002563 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002564 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2565 cpu_relax();
2566}
2567
Christian Borntraeger8e236542015-04-09 13:49:04 +02002568/* Kick a guest cpu out of SIE to process a request synchronously */
2569void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002570{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002571 kvm_make_request(req, vcpu);
2572 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002573}
2574
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002575static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2576 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002577{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002578 struct kvm *kvm = gmap->private;
2579 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002580 unsigned long prefix;
2581 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002582
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002583 if (gmap_is_shadow(gmap))
2584 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002585 if (start >= 1UL << 31)
2586 /* We are only interested in prefix pages */
2587 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002588 kvm_for_each_vcpu(i, vcpu, kvm) {
2589 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002590 prefix = kvm_s390_get_prefix(vcpu);
2591 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2592 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2593 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002594 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002595 }
2596 }
2597}
2598
Christoffer Dallb6d33832012-03-08 16:44:24 -05002599int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2600{
2601 /* kvm common code refers to this, but never calls it */
2602 BUG();
2603 return 0;
2604}
2605
Carsten Otte14eebd92012-05-15 14:15:26 +02002606static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2607 struct kvm_one_reg *reg)
2608{
2609 int r = -EINVAL;
2610
2611 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002612 case KVM_REG_S390_TODPR:
2613 r = put_user(vcpu->arch.sie_block->todpr,
2614 (u32 __user *)reg->addr);
2615 break;
2616 case KVM_REG_S390_EPOCHDIFF:
2617 r = put_user(vcpu->arch.sie_block->epoch,
2618 (u64 __user *)reg->addr);
2619 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002620 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002621 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002622 (u64 __user *)reg->addr);
2623 break;
2624 case KVM_REG_S390_CLOCK_COMP:
2625 r = put_user(vcpu->arch.sie_block->ckc,
2626 (u64 __user *)reg->addr);
2627 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002628 case KVM_REG_S390_PFTOKEN:
2629 r = put_user(vcpu->arch.pfault_token,
2630 (u64 __user *)reg->addr);
2631 break;
2632 case KVM_REG_S390_PFCOMPARE:
2633 r = put_user(vcpu->arch.pfault_compare,
2634 (u64 __user *)reg->addr);
2635 break;
2636 case KVM_REG_S390_PFSELECT:
2637 r = put_user(vcpu->arch.pfault_select,
2638 (u64 __user *)reg->addr);
2639 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002640 case KVM_REG_S390_PP:
2641 r = put_user(vcpu->arch.sie_block->pp,
2642 (u64 __user *)reg->addr);
2643 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002644 case KVM_REG_S390_GBEA:
2645 r = put_user(vcpu->arch.sie_block->gbea,
2646 (u64 __user *)reg->addr);
2647 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002648 default:
2649 break;
2650 }
2651
2652 return r;
2653}
2654
2655static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2656 struct kvm_one_reg *reg)
2657{
2658 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002659 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002660
2661 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002662 case KVM_REG_S390_TODPR:
2663 r = get_user(vcpu->arch.sie_block->todpr,
2664 (u32 __user *)reg->addr);
2665 break;
2666 case KVM_REG_S390_EPOCHDIFF:
2667 r = get_user(vcpu->arch.sie_block->epoch,
2668 (u64 __user *)reg->addr);
2669 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002670 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002671 r = get_user(val, (u64 __user *)reg->addr);
2672 if (!r)
2673 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002674 break;
2675 case KVM_REG_S390_CLOCK_COMP:
2676 r = get_user(vcpu->arch.sie_block->ckc,
2677 (u64 __user *)reg->addr);
2678 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002679 case KVM_REG_S390_PFTOKEN:
2680 r = get_user(vcpu->arch.pfault_token,
2681 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002682 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2683 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002684 break;
2685 case KVM_REG_S390_PFCOMPARE:
2686 r = get_user(vcpu->arch.pfault_compare,
2687 (u64 __user *)reg->addr);
2688 break;
2689 case KVM_REG_S390_PFSELECT:
2690 r = get_user(vcpu->arch.pfault_select,
2691 (u64 __user *)reg->addr);
2692 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002693 case KVM_REG_S390_PP:
2694 r = get_user(vcpu->arch.sie_block->pp,
2695 (u64 __user *)reg->addr);
2696 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002697 case KVM_REG_S390_GBEA:
2698 r = get_user(vcpu->arch.sie_block->gbea,
2699 (u64 __user *)reg->addr);
2700 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002701 default:
2702 break;
2703 }
2704
2705 return r;
2706}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002707
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002708static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2709{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002710 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002711 return 0;
2712}
2713
2714int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2715{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002716 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002717 return 0;
2718}
2719
2720int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2721{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002722 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002723 return 0;
2724}
2725
2726int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2727 struct kvm_sregs *sregs)
2728{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002729 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002730 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002731 return 0;
2732}
2733
2734int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2735 struct kvm_sregs *sregs)
2736{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002737 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002738 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002739 return 0;
2740}
2741
2742int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2743{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002744 if (test_fp_ctl(fpu->fpc))
2745 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002746 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002747 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002748 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2749 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002750 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002751 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002752 return 0;
2753}
2754
2755int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2756{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002757 /* make sure we have the latest values */
2758 save_fpu_regs();
2759 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002760 convert_vx_to_fp((freg_t *) fpu->fprs,
2761 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002762 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002763 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002764 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002765 return 0;
2766}
2767
2768static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2769{
2770 int rc = 0;
2771
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002772 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002773 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002774 else {
2775 vcpu->run->psw_mask = psw.mask;
2776 vcpu->run->psw_addr = psw.addr;
2777 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002778 return rc;
2779}
2780
2781int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2782 struct kvm_translation *tr)
2783{
2784 return -EINVAL; /* not implemented yet */
2785}
2786
David Hildenbrand27291e22014-01-23 12:26:52 +01002787#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2788 KVM_GUESTDBG_USE_HW_BP | \
2789 KVM_GUESTDBG_ENABLE)
2790
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002791int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2792 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002793{
David Hildenbrand27291e22014-01-23 12:26:52 +01002794 int rc = 0;
2795
2796 vcpu->guest_debug = 0;
2797 kvm_s390_clear_bp_data(vcpu);
2798
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002799 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002800 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002801 if (!sclp.has_gpere)
2802 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002803
2804 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2805 vcpu->guest_debug = dbg->control;
2806 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002807 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002808
2809 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2810 rc = kvm_s390_import_bp_data(vcpu, dbg);
2811 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002812 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002813 vcpu->arch.guestdbg.last_bp = 0;
2814 }
2815
2816 if (rc) {
2817 vcpu->guest_debug = 0;
2818 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002819 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002820 }
2821
2822 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002823}
2824
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002825int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2826 struct kvm_mp_state *mp_state)
2827{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002828 /* CHECK_STOP and LOAD are not supported yet */
2829 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2830 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002831}
2832
2833int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2834 struct kvm_mp_state *mp_state)
2835{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002836 int rc = 0;
2837
2838 /* user space knows about this interface - let it control the state */
2839 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2840
2841 switch (mp_state->mp_state) {
2842 case KVM_MP_STATE_STOPPED:
2843 kvm_s390_vcpu_stop(vcpu);
2844 break;
2845 case KVM_MP_STATE_OPERATING:
2846 kvm_s390_vcpu_start(vcpu);
2847 break;
2848 case KVM_MP_STATE_LOAD:
2849 case KVM_MP_STATE_CHECK_STOP:
2850 /* fall through - CHECK_STOP and LOAD are not supported yet */
2851 default:
2852 rc = -ENXIO;
2853 }
2854
2855 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002856}
2857
David Hildenbrand8ad35752014-03-14 11:00:21 +01002858static bool ibs_enabled(struct kvm_vcpu *vcpu)
2859{
2860 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2861}
2862
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002863static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2864{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002865retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002866 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02002867 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002868 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002869 /*
2870 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002871 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002872 * This ensures that the ipte instruction for this request has
2873 * already finished. We might race against a second unmapper that
2874 * wants to set the blocking bit. Lets just retry the request loop.
2875 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002876 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002877 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002878 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2879 kvm_s390_get_prefix(vcpu),
2880 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002881 if (rc) {
2882 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002883 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002884 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002885 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002886 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002887
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002888 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2889 vcpu->arch.sie_block->ihcpu = 0xffff;
2890 goto retry;
2891 }
2892
David Hildenbrand8ad35752014-03-14 11:00:21 +01002893 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2894 if (!ibs_enabled(vcpu)) {
2895 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002896 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002897 &vcpu->arch.sie_block->cpuflags);
2898 }
2899 goto retry;
2900 }
2901
2902 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2903 if (ibs_enabled(vcpu)) {
2904 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002905 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002906 &vcpu->arch.sie_block->cpuflags);
2907 }
2908 goto retry;
2909 }
2910
David Hildenbrand6502a342016-06-21 14:19:51 +02002911 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2912 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2913 goto retry;
2914 }
2915
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002916 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2917 /*
2918 * Disable CMMA virtualization; we will emulate the ESSA
2919 * instruction manually, in order to provide additional
2920 * functionalities needed for live migration.
2921 */
2922 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2923 goto retry;
2924 }
2925
2926 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2927 /*
2928 * Re-enable CMMA virtualization if CMMA is available and
2929 * was used.
2930 */
2931 if ((vcpu->kvm->arch.use_cmma) &&
2932 (vcpu->kvm->mm->context.use_cmma))
2933 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2934 goto retry;
2935 }
2936
David Hildenbrand0759d062014-05-13 16:54:32 +02002937 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002938 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002939
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002940 return 0;
2941}
2942
Collin L. Walling8fa16962016-07-26 15:29:44 -04002943void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
2944 const struct kvm_s390_vm_tod_clock *gtod)
2945{
2946 struct kvm_vcpu *vcpu;
2947 struct kvm_s390_tod_clock_ext htod;
2948 int i;
2949
2950 mutex_lock(&kvm->lock);
2951 preempt_disable();
2952
2953 get_tod_clock_ext((char *)&htod);
2954
2955 kvm->arch.epoch = gtod->tod - htod.tod;
2956 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
2957
2958 if (kvm->arch.epoch > gtod->tod)
2959 kvm->arch.epdx -= 1;
2960
2961 kvm_s390_vcpu_block_all(kvm);
2962 kvm_for_each_vcpu(i, vcpu, kvm) {
2963 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2964 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
2965 }
2966
2967 kvm_s390_vcpu_unblock_all(kvm);
2968 preempt_enable();
2969 mutex_unlock(&kvm->lock);
2970}
2971
David Hildenbrand25ed1672015-05-12 09:49:14 +02002972void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2973{
2974 struct kvm_vcpu *vcpu;
2975 int i;
2976
2977 mutex_lock(&kvm->lock);
2978 preempt_disable();
2979 kvm->arch.epoch = tod - get_tod_clock();
2980 kvm_s390_vcpu_block_all(kvm);
2981 kvm_for_each_vcpu(i, vcpu, kvm)
2982 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2983 kvm_s390_vcpu_unblock_all(kvm);
2984 preempt_enable();
2985 mutex_unlock(&kvm->lock);
2986}
2987
Thomas Huthfa576c52014-05-06 17:20:16 +02002988/**
2989 * kvm_arch_fault_in_page - fault-in guest page if necessary
2990 * @vcpu: The corresponding virtual cpu
2991 * @gpa: Guest physical address
2992 * @writable: Whether the page should be writable or not
2993 *
2994 * Make sure that a guest page has been faulted-in on the host.
2995 *
2996 * Return: Zero on success, negative error code otherwise.
2997 */
2998long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002999{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003000 return gmap_fault(vcpu->arch.gmap, gpa,
3001 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003002}
3003
Dominik Dingel3c038e62013-10-07 17:11:48 +02003004static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3005 unsigned long token)
3006{
3007 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003008 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003009
3010 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003011 irq.u.ext.ext_params2 = token;
3012 irq.type = KVM_S390_INT_PFAULT_INIT;
3013 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003014 } else {
3015 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003016 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003017 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3018 }
3019}
3020
3021void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3022 struct kvm_async_pf *work)
3023{
3024 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3025 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3026}
3027
3028void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3029 struct kvm_async_pf *work)
3030{
3031 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3032 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3033}
3034
3035void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3036 struct kvm_async_pf *work)
3037{
3038 /* s390 will always inject the page directly */
3039}
3040
3041bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3042{
3043 /*
3044 * s390 will always inject the page directly,
3045 * but we still want check_async_completion to cleanup
3046 */
3047 return true;
3048}
3049
3050static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3051{
3052 hva_t hva;
3053 struct kvm_arch_async_pf arch;
3054 int rc;
3055
3056 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3057 return 0;
3058 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3059 vcpu->arch.pfault_compare)
3060 return 0;
3061 if (psw_extint_disabled(vcpu))
3062 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003063 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003064 return 0;
3065 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
3066 return 0;
3067 if (!vcpu->arch.gmap->pfault_enabled)
3068 return 0;
3069
Heiko Carstens81480cc2014-01-01 16:36:07 +01003070 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3071 hva += current->thread.gmap_addr & ~PAGE_MASK;
3072 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003073 return 0;
3074
3075 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3076 return rc;
3077}
3078
Thomas Huth3fb4c402013-09-12 10:33:43 +02003079static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003080{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003081 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003082
Dominik Dingel3c038e62013-10-07 17:11:48 +02003083 /*
3084 * On s390 notifications for arriving pages will be delivered directly
3085 * to the guest but the house keeping for completed pfaults is
3086 * handled outside the worker.
3087 */
3088 kvm_check_async_pf_completion(vcpu);
3089
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003090 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3091 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003092
3093 if (need_resched())
3094 schedule();
3095
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003096 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003097 s390_handle_mcck();
3098
Jens Freimann79395032014-04-17 10:10:30 +02003099 if (!kvm_is_ucontrol(vcpu->kvm)) {
3100 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3101 if (rc)
3102 return rc;
3103 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003104
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003105 rc = kvm_s390_handle_requests(vcpu);
3106 if (rc)
3107 return rc;
3108
David Hildenbrand27291e22014-01-23 12:26:52 +01003109 if (guestdbg_enabled(vcpu)) {
3110 kvm_s390_backup_guest_per_regs(vcpu);
3111 kvm_s390_patch_guest_per_regs(vcpu);
3112 }
3113
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003114 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003115 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3116 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3117 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003118
Thomas Huth3fb4c402013-09-12 10:33:43 +02003119 return 0;
3120}
3121
Thomas Huth492d8642015-02-10 16:11:01 +01003122static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3123{
David Hildenbrand56317922016-01-12 17:37:58 +01003124 struct kvm_s390_pgm_info pgm_info = {
3125 .code = PGM_ADDRESSING,
3126 };
3127 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003128 int rc;
3129
3130 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3131 trace_kvm_s390_sie_fault(vcpu);
3132
3133 /*
3134 * We want to inject an addressing exception, which is defined as a
3135 * suppressing or terminating exception. However, since we came here
3136 * by a DAT access exception, the PSW still points to the faulting
3137 * instruction since DAT exceptions are nullifying. So we've got
3138 * to look up the current opcode to get the length of the instruction
3139 * to be able to forward the PSW.
3140 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003141 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003142 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003143 if (rc < 0) {
3144 return rc;
3145 } else if (rc) {
3146 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3147 * Forward by arbitrary ilc, injection will take care of
3148 * nullification if necessary.
3149 */
3150 pgm_info = vcpu->arch.pgm;
3151 ilen = 4;
3152 }
David Hildenbrand56317922016-01-12 17:37:58 +01003153 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3154 kvm_s390_forward_psw(vcpu, ilen);
3155 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003156}
3157
Thomas Huth3fb4c402013-09-12 10:33:43 +02003158static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3159{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003160 struct mcck_volatile_info *mcck_info;
3161 struct sie_page *sie_page;
3162
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003163 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3164 vcpu->arch.sie_block->icptcode);
3165 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3166
David Hildenbrand27291e22014-01-23 12:26:52 +01003167 if (guestdbg_enabled(vcpu))
3168 kvm_s390_restore_guest_per_regs(vcpu);
3169
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003170 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3171 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003172
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003173 if (exit_reason == -EINTR) {
3174 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3175 sie_page = container_of(vcpu->arch.sie_block,
3176 struct sie_page, sie_block);
3177 mcck_info = &sie_page->mcck_info;
3178 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3179 return 0;
3180 }
3181
David Hildenbrand71f116b2015-10-19 16:24:28 +02003182 if (vcpu->arch.sie_block->icptcode > 0) {
3183 int rc = kvm_handle_sie_intercept(vcpu);
3184
3185 if (rc != -EOPNOTSUPP)
3186 return rc;
3187 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3188 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3189 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3190 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3191 return -EREMOTE;
3192 } else if (exit_reason != -EFAULT) {
3193 vcpu->stat.exit_null++;
3194 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003195 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3196 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3197 vcpu->run->s390_ucontrol.trans_exc_code =
3198 current->thread.gmap_addr;
3199 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003200 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003201 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003202 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003203 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003204 if (kvm_arch_setup_async_pf(vcpu))
3205 return 0;
3206 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003207 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003208 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003209}
3210
3211static int __vcpu_run(struct kvm_vcpu *vcpu)
3212{
3213 int rc, exit_reason;
3214
Thomas Huth800c1062013-09-12 10:33:45 +02003215 /*
3216 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3217 * ning the guest), so that memslots (and other stuff) are protected
3218 */
3219 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3220
Thomas Hutha76ccff2013-09-12 10:33:44 +02003221 do {
3222 rc = vcpu_pre_run(vcpu);
3223 if (rc)
3224 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003225
Thomas Huth800c1062013-09-12 10:33:45 +02003226 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003227 /*
3228 * As PF_VCPU will be used in fault handler, between
3229 * guest_enter and guest_exit should be no uaccess.
3230 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003231 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003232 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003233 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003234 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003235 exit_reason = sie64a(vcpu->arch.sie_block,
3236 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003237 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003238 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003239 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003240 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003241 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003242
Thomas Hutha76ccff2013-09-12 10:33:44 +02003243 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003244 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003245
Thomas Huth800c1062013-09-12 10:33:45 +02003246 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003247 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003248}
3249
David Hildenbrandb028ee32014-07-17 10:47:43 +02003250static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3251{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003252 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003253 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003254
3255 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003256 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003257 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3258 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3259 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3260 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3261 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3262 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003263 /* some control register changes require a tlb flush */
3264 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003265 }
3266 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003267 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003268 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3269 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3270 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3271 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3272 }
3273 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3274 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3275 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3276 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003277 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3278 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003279 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003280 /*
3281 * If userspace sets the riccb (e.g. after migration) to a valid state,
3282 * we should enable RI here instead of doing the lazy enablement.
3283 */
3284 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003285 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02003286 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003287 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003288 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003289 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003290 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003291 /*
3292 * If userspace sets the gscb (e.g. after migration) to non-zero,
3293 * we should enable GS here instead of doing the lazy enablement.
3294 */
3295 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3296 test_kvm_facility(vcpu->kvm, 133) &&
3297 gscb->gssm &&
3298 !vcpu->arch.gs_enabled) {
3299 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3300 vcpu->arch.sie_block->ecb |= ECB_GS;
3301 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3302 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003303 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003304 save_access_regs(vcpu->arch.host_acrs);
3305 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003306 /* save host (userspace) fprs/vrs */
3307 save_fpu_regs();
3308 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3309 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3310 if (MACHINE_HAS_VX)
3311 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3312 else
3313 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3314 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3315 if (test_fp_ctl(current->thread.fpu.fpc))
3316 /* User space provided an invalid FPC, let's clear it */
3317 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003318 if (MACHINE_HAS_GS) {
3319 preempt_disable();
3320 __ctl_set_bit(2, 4);
3321 if (current->thread.gs_cb) {
3322 vcpu->arch.host_gscb = current->thread.gs_cb;
3323 save_gs_cb(vcpu->arch.host_gscb);
3324 }
3325 if (vcpu->arch.gs_enabled) {
3326 current->thread.gs_cb = (struct gs_cb *)
3327 &vcpu->run->s.regs.gscb;
3328 restore_gs_cb(current->thread.gs_cb);
3329 }
3330 preempt_enable();
3331 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003332
David Hildenbrandb028ee32014-07-17 10:47:43 +02003333 kvm_run->kvm_dirty_regs = 0;
3334}
3335
3336static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3337{
3338 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3339 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3340 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3341 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003342 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003343 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3344 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3345 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3346 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3347 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3348 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3349 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003350 save_access_regs(vcpu->run->s.regs.acrs);
3351 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003352 /* Save guest register state */
3353 save_fpu_regs();
3354 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3355 /* Restore will be done lazily at return */
3356 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3357 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003358 if (MACHINE_HAS_GS) {
3359 __ctl_set_bit(2, 4);
3360 if (vcpu->arch.gs_enabled)
3361 save_gs_cb(current->thread.gs_cb);
3362 preempt_disable();
3363 current->thread.gs_cb = vcpu->arch.host_gscb;
3364 restore_gs_cb(vcpu->arch.host_gscb);
3365 preempt_enable();
3366 if (!vcpu->arch.host_gscb)
3367 __ctl_clear_bit(2, 4);
3368 vcpu->arch.host_gscb = NULL;
3369 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003370
David Hildenbrandb028ee32014-07-17 10:47:43 +02003371}
3372
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003373int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3374{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003375 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003376 sigset_t sigsaved;
3377
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003378 if (kvm_run->immediate_exit)
3379 return -EINTR;
3380
David Hildenbrand27291e22014-01-23 12:26:52 +01003381 if (guestdbg_exit_pending(vcpu)) {
3382 kvm_s390_prepare_debug_exit(vcpu);
3383 return 0;
3384 }
3385
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003386 if (vcpu->sigset_active)
3387 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3388
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003389 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3390 kvm_s390_vcpu_start(vcpu);
3391 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003392 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003393 vcpu->vcpu_id);
3394 return -EINVAL;
3395 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003396
David Hildenbrandb028ee32014-07-17 10:47:43 +02003397 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003398 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003399
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003400 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003401 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003402
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003403 if (signal_pending(current) && !rc) {
3404 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003405 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003406 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003407
David Hildenbrand27291e22014-01-23 12:26:52 +01003408 if (guestdbg_exit_pending(vcpu) && !rc) {
3409 kvm_s390_prepare_debug_exit(vcpu);
3410 rc = 0;
3411 }
3412
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003413 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003414 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003415 rc = 0;
3416 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003417
David Hildenbranddb0758b2016-02-15 09:42:25 +01003418 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003419 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003420
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003421 if (vcpu->sigset_active)
3422 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3423
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003424 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003425 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003426}
3427
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003428/*
3429 * store status at address
3430 * we use have two special cases:
3431 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3432 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3433 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003434int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003435{
Carsten Otte092670c2011-07-24 10:48:22 +02003436 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003437 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003438 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003439 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003440 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003441
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003442 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003443 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3444 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003445 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003446 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003447 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3448 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003449 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003450 gpa = px;
3451 } else
3452 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003453
3454 /* manually convert vector registers if necessary */
3455 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003456 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003457 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3458 fprs, 128);
3459 } else {
3460 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003461 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003462 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003463 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003464 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003465 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003466 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003467 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003468 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003469 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003470 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003471 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003472 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003473 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003474 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003475 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003476 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003477 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003478 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003479 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003480 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003481 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003482 &vcpu->arch.sie_block->gcr, 128);
3483 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003484}
3485
Thomas Huthe8798922013-11-06 15:46:33 +01003486int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3487{
3488 /*
3489 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003490 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003491 * it into the save area
3492 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003493 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003494 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003495 save_access_regs(vcpu->run->s.regs.acrs);
3496
3497 return kvm_s390_store_status_unloaded(vcpu, addr);
3498}
3499
David Hildenbrand8ad35752014-03-14 11:00:21 +01003500static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3501{
3502 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003503 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003504}
3505
3506static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3507{
3508 unsigned int i;
3509 struct kvm_vcpu *vcpu;
3510
3511 kvm_for_each_vcpu(i, vcpu, kvm) {
3512 __disable_ibs_on_vcpu(vcpu);
3513 }
3514}
3515
3516static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3517{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003518 if (!sclp.has_ibs)
3519 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003520 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003521 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003522}
3523
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003524void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3525{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003526 int i, online_vcpus, started_vcpus = 0;
3527
3528 if (!is_vcpu_stopped(vcpu))
3529 return;
3530
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003531 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003532 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003533 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003534 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3535
3536 for (i = 0; i < online_vcpus; i++) {
3537 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3538 started_vcpus++;
3539 }
3540
3541 if (started_vcpus == 0) {
3542 /* we're the only active VCPU -> speed it up */
3543 __enable_ibs_on_vcpu(vcpu);
3544 } else if (started_vcpus == 1) {
3545 /*
3546 * As we are starting a second VCPU, we have to disable
3547 * the IBS facility on all VCPUs to remove potentially
3548 * oustanding ENABLE requests.
3549 */
3550 __disable_ibs_on_all_vcpus(vcpu->kvm);
3551 }
3552
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003553 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003554 /*
3555 * Another VCPU might have used IBS while we were offline.
3556 * Let's play safe and flush the VCPU at startup.
3557 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003558 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003559 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003560 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003561}
3562
3563void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3564{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003565 int i, online_vcpus, started_vcpus = 0;
3566 struct kvm_vcpu *started_vcpu = NULL;
3567
3568 if (is_vcpu_stopped(vcpu))
3569 return;
3570
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003571 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003572 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003573 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003574 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3575
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003576 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003577 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003578
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003579 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003580 __disable_ibs_on_vcpu(vcpu);
3581
3582 for (i = 0; i < online_vcpus; i++) {
3583 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3584 started_vcpus++;
3585 started_vcpu = vcpu->kvm->vcpus[i];
3586 }
3587 }
3588
3589 if (started_vcpus == 1) {
3590 /*
3591 * As we only have one VCPU left, we want to enable the
3592 * IBS facility for that VCPU to speed it up.
3593 */
3594 __enable_ibs_on_vcpu(started_vcpu);
3595 }
3596
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003597 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003598 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003599}
3600
Cornelia Huckd6712df2012-12-20 15:32:11 +01003601static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3602 struct kvm_enable_cap *cap)
3603{
3604 int r;
3605
3606 if (cap->flags)
3607 return -EINVAL;
3608
3609 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003610 case KVM_CAP_S390_CSS_SUPPORT:
3611 if (!vcpu->kvm->arch.css_support) {
3612 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003613 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003614 trace_kvm_s390_enable_css(vcpu->kvm);
3615 }
3616 r = 0;
3617 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003618 default:
3619 r = -EINVAL;
3620 break;
3621 }
3622 return r;
3623}
3624
Thomas Huth41408c282015-02-06 15:01:21 +01003625static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3626 struct kvm_s390_mem_op *mop)
3627{
3628 void __user *uaddr = (void __user *)mop->buf;
3629 void *tmpbuf = NULL;
3630 int r, srcu_idx;
3631 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3632 | KVM_S390_MEMOP_F_CHECK_ONLY;
3633
3634 if (mop->flags & ~supported_flags)
3635 return -EINVAL;
3636
3637 if (mop->size > MEM_OP_MAX_SIZE)
3638 return -E2BIG;
3639
3640 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3641 tmpbuf = vmalloc(mop->size);
3642 if (!tmpbuf)
3643 return -ENOMEM;
3644 }
3645
3646 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3647
3648 switch (mop->op) {
3649 case KVM_S390_MEMOP_LOGICAL_READ:
3650 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003651 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3652 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003653 break;
3654 }
3655 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3656 if (r == 0) {
3657 if (copy_to_user(uaddr, tmpbuf, mop->size))
3658 r = -EFAULT;
3659 }
3660 break;
3661 case KVM_S390_MEMOP_LOGICAL_WRITE:
3662 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003663 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3664 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003665 break;
3666 }
3667 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3668 r = -EFAULT;
3669 break;
3670 }
3671 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3672 break;
3673 default:
3674 r = -EINVAL;
3675 }
3676
3677 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3678
3679 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3680 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3681
3682 vfree(tmpbuf);
3683 return r;
3684}
3685
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003686long kvm_arch_vcpu_ioctl(struct file *filp,
3687 unsigned int ioctl, unsigned long arg)
3688{
3689 struct kvm_vcpu *vcpu = filp->private_data;
3690 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003691 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003692 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003693
Avi Kivity93736622010-05-13 12:35:17 +03003694 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003695 case KVM_S390_IRQ: {
3696 struct kvm_s390_irq s390irq;
3697
3698 r = -EFAULT;
3699 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3700 break;
3701 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3702 break;
3703 }
Avi Kivity93736622010-05-13 12:35:17 +03003704 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003705 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003706 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003707
Avi Kivity93736622010-05-13 12:35:17 +03003708 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003709 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003710 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003711 if (s390int_to_s390irq(&s390int, &s390irq))
3712 return -EINVAL;
3713 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003714 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003715 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003716 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003717 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003718 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003719 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003720 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003721 case KVM_S390_SET_INITIAL_PSW: {
3722 psw_t psw;
3723
Avi Kivitybc923cc2010-05-13 12:21:46 +03003724 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003725 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003726 break;
3727 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3728 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003729 }
3730 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003731 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3732 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003733 case KVM_SET_ONE_REG:
3734 case KVM_GET_ONE_REG: {
3735 struct kvm_one_reg reg;
3736 r = -EFAULT;
3737 if (copy_from_user(&reg, argp, sizeof(reg)))
3738 break;
3739 if (ioctl == KVM_SET_ONE_REG)
3740 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3741 else
3742 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3743 break;
3744 }
Carsten Otte27e03932012-01-04 10:25:21 +01003745#ifdef CONFIG_KVM_S390_UCONTROL
3746 case KVM_S390_UCAS_MAP: {
3747 struct kvm_s390_ucas_mapping ucasmap;
3748
3749 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3750 r = -EFAULT;
3751 break;
3752 }
3753
3754 if (!kvm_is_ucontrol(vcpu->kvm)) {
3755 r = -EINVAL;
3756 break;
3757 }
3758
3759 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3760 ucasmap.vcpu_addr, ucasmap.length);
3761 break;
3762 }
3763 case KVM_S390_UCAS_UNMAP: {
3764 struct kvm_s390_ucas_mapping ucasmap;
3765
3766 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3767 r = -EFAULT;
3768 break;
3769 }
3770
3771 if (!kvm_is_ucontrol(vcpu->kvm)) {
3772 r = -EINVAL;
3773 break;
3774 }
3775
3776 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3777 ucasmap.length);
3778 break;
3779 }
3780#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003781 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003782 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003783 break;
3784 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003785 case KVM_ENABLE_CAP:
3786 {
3787 struct kvm_enable_cap cap;
3788 r = -EFAULT;
3789 if (copy_from_user(&cap, argp, sizeof(cap)))
3790 break;
3791 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3792 break;
3793 }
Thomas Huth41408c282015-02-06 15:01:21 +01003794 case KVM_S390_MEM_OP: {
3795 struct kvm_s390_mem_op mem_op;
3796
3797 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3798 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3799 else
3800 r = -EFAULT;
3801 break;
3802 }
Jens Freimann816c7662014-11-24 17:13:46 +01003803 case KVM_S390_SET_IRQ_STATE: {
3804 struct kvm_s390_irq_state irq_state;
3805
3806 r = -EFAULT;
3807 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3808 break;
3809 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3810 irq_state.len == 0 ||
3811 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3812 r = -EINVAL;
3813 break;
3814 }
3815 r = kvm_s390_set_irq_state(vcpu,
3816 (void __user *) irq_state.buf,
3817 irq_state.len);
3818 break;
3819 }
3820 case KVM_S390_GET_IRQ_STATE: {
3821 struct kvm_s390_irq_state irq_state;
3822
3823 r = -EFAULT;
3824 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3825 break;
3826 if (irq_state.len == 0) {
3827 r = -EINVAL;
3828 break;
3829 }
3830 r = kvm_s390_get_irq_state(vcpu,
3831 (__u8 __user *) irq_state.buf,
3832 irq_state.len);
3833 break;
3834 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003835 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003836 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003837 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003838 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003839}
3840
Carsten Otte5b1c1492012-01-04 10:25:23 +01003841int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3842{
3843#ifdef CONFIG_KVM_S390_UCONTROL
3844 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3845 && (kvm_is_ucontrol(vcpu->kvm))) {
3846 vmf->page = virt_to_page(vcpu->arch.sie_block);
3847 get_page(vmf->page);
3848 return 0;
3849 }
3850#endif
3851 return VM_FAULT_SIGBUS;
3852}
3853
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303854int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3855 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003856{
3857 return 0;
3858}
3859
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003860/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003861int kvm_arch_prepare_memory_region(struct kvm *kvm,
3862 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003863 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003864 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003865{
Nick Wangdd2887e2013-03-25 17:22:57 +01003866 /* A few sanity checks. We can have memory slots which have to be
3867 located/ended at a segment boundary (1MB). The memory in userland is
3868 ok to be fragmented into various different vmas. It is okay to mmap()
3869 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003870
Carsten Otte598841c2011-07-24 10:48:21 +02003871 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003872 return -EINVAL;
3873
Carsten Otte598841c2011-07-24 10:48:21 +02003874 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003875 return -EINVAL;
3876
Dominik Dingela3a92c32014-12-01 17:24:42 +01003877 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3878 return -EINVAL;
3879
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003880 return 0;
3881}
3882
3883void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003884 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003885 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003886 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003887 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003888{
Carsten Ottef7850c92011-07-24 10:48:23 +02003889 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003890
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003891 /* If the basics of the memslot do not change, we do not want
3892 * to update the gmap. Every update causes several unnecessary
3893 * segment translation exceptions. This is usually handled just
3894 * fine by the normal fault handler + gmap, but it will also
3895 * cause faults on the prefix page of running guest CPUs.
3896 */
3897 if (old->userspace_addr == mem->userspace_addr &&
3898 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3899 old->npages * PAGE_SIZE == mem->memory_size)
3900 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003901
3902 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3903 mem->guest_phys_addr, mem->memory_size);
3904 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003905 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003906 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003907}
3908
Alexander Yarygin60a37702016-04-01 15:38:57 +03003909static inline unsigned long nonhyp_mask(int i)
3910{
3911 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3912
3913 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3914}
3915
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003916void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3917{
3918 vcpu->valid_wakeup = false;
3919}
3920
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003921static int __init kvm_s390_init(void)
3922{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003923 int i;
3924
David Hildenbrand07197fd2015-01-30 16:01:38 +01003925 if (!sclp.has_sief2) {
3926 pr_info("SIE not available\n");
3927 return -ENODEV;
3928 }
3929
Alexander Yarygin60a37702016-04-01 15:38:57 +03003930 for (i = 0; i < 16; i++)
3931 kvm_s390_fac_list_mask[i] |=
3932 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3933
Michael Mueller9d8d5782015-02-02 15:42:51 +01003934 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003935}
3936
3937static void __exit kvm_s390_exit(void)
3938{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003939 kvm_exit();
3940}
3941
3942module_init(kvm_s390_init);
3943module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003944
3945/*
3946 * Enable autoloading of the kvm module.
3947 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3948 * since x86 takes a different approach.
3949 */
3950#include <linux/miscdevice.h>
3951MODULE_ALIAS_MISCDEV(KVM_MINOR);
3952MODULE_ALIAS("devname:kvm");