blob: 9f23a9e81a91d9006f6f52790d0875125914a732 [file] [log] [blame]
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001/*
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02002 * hosting zSeries kernel virtual machines
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003 *
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 2008, 2009
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020013 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040014 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010015 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010046#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010047#include "gaccess.h"
48
David Hildenbrandea2cdd22015-05-20 13:24:02 +020049#define KMSG_COMPONENT "kvm-s390"
50#undef pr_fmt
51#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
52
Cornelia Huck5786fff2012-07-23 17:20:29 +020053#define CREATE_TRACE_POINTS
54#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020055#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020056
Thomas Huth41408c282015-02-06 15:01:21 +010057#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010058#define LOCAL_IRQS 32
59#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
60 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010061
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
63
64struct kvm_stats_debugfs_item debugfs_entries[] = {
65 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020066 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010067 { "exit_validity", VCPU_STAT(exit_validity) },
68 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
69 { "exit_external_request", VCPU_STAT(exit_external_request) },
70 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020079 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020080 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010081 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010082 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
83 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010084 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020085 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010086 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
87 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
88 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
89 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
90 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
91 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +020093 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +010094 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
95 { "instruction_spx", VCPU_STAT(instruction_spx) },
96 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
97 { "instruction_stap", VCPU_STAT(instruction_stap) },
98 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
Heiko Carstens8a2422342014-01-10 14:33:28 +010099 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100100 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
101 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200102 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100103 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
104 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200105 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200106 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200107 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100108 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100109 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200110 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100111 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200112 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
113 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100114 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200115 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
116 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500117 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100118 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
119 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
120 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200121 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
122 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
123 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger388186b2011-10-30 15:17:03 +0100124 { "diagnose_10", VCPU_STAT(diagnose_10) },
Christian Borntraegere28acfe2008-03-25 18:47:34 +0100125 { "diagnose_44", VCPU_STAT(diagnose_44) },
Konstantin Weitz41628d32012-04-25 15:30:38 +0200126 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger175a5c92015-07-07 15:19:32 +0200127 { "diagnose_258", VCPU_STAT(diagnose_258) },
128 { "diagnose_308", VCPU_STAT(diagnose_308) },
129 { "diagnose_500", VCPU_STAT(diagnose_500) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100130 { NULL }
131};
132
David Hildenbranda411edf2016-02-02 15:41:22 +0100133/* allow nested virtualization in KVM (if enabled by user space) */
134static int nested;
135module_param(nested, int, S_IRUGO);
136MODULE_PARM_DESC(nested, "Nested virtualization support");
137
Michael Mueller9d8d5782015-02-02 15:42:51 +0100138/* upper facilities limit for kvm */
Heiko Carstensf6c1d352016-08-16 10:31:10 +0200139unsigned long kvm_s390_fac_list_mask[16] = { FACILITIES_KVM };
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100140
Michael Mueller9d8d5782015-02-02 15:42:51 +0100141unsigned long kvm_s390_fac_list_mask_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200142{
Michael Mueller9d8d5782015-02-02 15:42:51 +0100143 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
144 return ARRAY_SIZE(kvm_s390_fac_list_mask);
Michael Mueller78c4b592013-07-26 15:04:04 +0200145}
146
David Hildenbrand15c97052015-03-19 17:36:43 +0100147/* available cpu features supported by kvm */
148static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200149/* available subfunctions indicated via query / "test bit" */
150static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100151
Michael Mueller9d8d5782015-02-02 15:42:51 +0100152static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200153static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200154debug_info_t *kvm_s390_dbf;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100155
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100156/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200157int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100158{
159 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200160 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100161}
162
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100163static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
164 unsigned long end);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200165
Fan Zhangfdf03652015-05-13 10:58:41 +0200166/*
167 * This callback is executed during stop_machine(). All CPUs are therefore
168 * temporarily stopped. In order not to change guest behavior, we have to
169 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
170 * so a CPU won't be stopped while calculating with the epoch.
171 */
172static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
173 void *v)
174{
175 struct kvm *kvm;
176 struct kvm_vcpu *vcpu;
177 int i;
178 unsigned long long *delta = v;
179
180 list_for_each_entry(kvm, &vm_list, vm_list) {
181 kvm->arch.epoch -= *delta;
182 kvm_for_each_vcpu(i, vcpu, kvm) {
183 vcpu->arch.sie_block->epoch -= *delta;
David Hildenbranddb0758b2016-02-15 09:42:25 +0100184 if (vcpu->arch.cputm_enabled)
185 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100186 if (vcpu->arch.vsie_block)
187 vcpu->arch.vsie_block->epoch -= *delta;
Fan Zhangfdf03652015-05-13 10:58:41 +0200188 }
189 }
190 return NOTIFY_OK;
191}
192
193static struct notifier_block kvm_clock_notifier = {
194 .notifier_call = kvm_clock_sync,
195};
196
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100197int kvm_arch_hardware_setup(void)
198{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200199 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100200 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200201 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
202 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200203 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100205 return 0;
206}
207
208void kvm_arch_hardware_unsetup(void)
209{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100210 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200211 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200212 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
213 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100214}
215
David Hildenbrand22be5a132016-01-21 13:22:54 +0100216static void allow_cpu_feat(unsigned long nr)
217{
218 set_bit_inv(nr, kvm_s390_available_cpu_feat);
219}
220
David Hildenbrand0a763c72016-05-18 16:03:47 +0200221static inline int plo_test_bit(unsigned char nr)
222{
223 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100224 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200225
226 asm volatile(
227 /* Parameter registers are ignored for "test bit" */
228 " plo 0,0,0,0(0)\n"
229 " ipm %0\n"
230 " srl %0,28\n"
231 : "=d" (cc)
232 : "d" (r0)
233 : "cc");
234 return cc == 0;
235}
236
David Hildenbrand22be5a132016-01-21 13:22:54 +0100237static void kvm_s390_cpu_feat_init(void)
238{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200239 int i;
240
241 for (i = 0; i < 256; ++i) {
242 if (plo_test_bit(i))
243 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
244 }
245
246 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400247 ptff(kvm_s390_available_subfunc.ptff,
248 sizeof(kvm_s390_available_subfunc.ptff),
249 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200250
251 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200252 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
253 kvm_s390_available_subfunc.kmac);
254 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
255 kvm_s390_available_subfunc.kmc);
256 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
257 kvm_s390_available_subfunc.km);
258 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
259 kvm_s390_available_subfunc.kimd);
260 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
261 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200262 }
263 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200264 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
265 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200266 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200267 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
268 kvm_s390_available_subfunc.kmctr);
269 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
270 kvm_s390_available_subfunc.kmf);
271 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
272 kvm_s390_available_subfunc.kmo);
273 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
274 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200275 }
276 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100277 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200278 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200279
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400280 if (test_facility(146)) /* MSA8 */
281 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
282 kvm_s390_available_subfunc.kma);
283
David Hildenbrand22be5a132016-01-21 13:22:54 +0100284 if (MACHINE_HAS_ESOP)
285 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200286 /*
287 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
288 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
289 */
290 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100291 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200292 return;
293 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100294 if (sclp.has_64bscao)
295 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100296 if (sclp.has_siif)
297 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100298 if (sclp.has_gpere)
299 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100300 if (sclp.has_gsls)
301 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100302 if (sclp.has_ib)
303 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100304 if (sclp.has_cei)
305 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100306 if (sclp.has_ibs)
307 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500308 if (sclp.has_kss)
309 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200310 /*
311 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
312 * all skey handling functions read/set the skey from the PGSTE
313 * instead of the real storage key.
314 *
315 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
316 * pages being detected as preserved although they are resident.
317 *
318 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
319 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
320 *
321 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
322 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
323 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
324 *
325 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
326 * cannot easily shadow the SCA because of the ipte lock.
327 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100328}
329
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100330int kvm_arch_init(void *opaque)
331{
Christian Borntraeger78f26132015-07-22 15:50:58 +0200332 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
333 if (!kvm_s390_dbf)
334 return -ENOMEM;
335
336 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
337 debug_unregister(kvm_s390_dbf);
338 return -ENOMEM;
339 }
340
David Hildenbrand22be5a132016-01-21 13:22:54 +0100341 kvm_s390_cpu_feat_init();
342
Cornelia Huck84877d92014-09-02 10:27:35 +0100343 /* Register floating interrupt controller interface. */
344 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100345}
346
Christian Borntraeger78f26132015-07-22 15:50:58 +0200347void kvm_arch_exit(void)
348{
349 debug_unregister(kvm_s390_dbf);
350}
351
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100352/* Section: device related */
353long kvm_arch_dev_ioctl(struct file *filp,
354 unsigned int ioctl, unsigned long arg)
355{
356 if (ioctl == KVM_S390_ENABLE_SIE)
357 return s390_enable_sie();
358 return -EINVAL;
359}
360
Alexander Graf784aa3d2014-07-14 18:27:35 +0200361int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100362{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100363 int r;
364
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200365 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100366 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200367 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100368 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100369#ifdef CONFIG_KVM_S390_UCONTROL
370 case KVM_CAP_S390_UCONTROL:
371#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200372 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100373 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200374 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100375 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100376 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100377 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200378 case KVM_CAP_DEVICE_CTRL:
Cornelia Huckd938dc52013-10-23 18:26:34 +0200379 case KVM_CAP_ENABLE_CAP_VM:
Cornelia Huck78599d92014-07-15 09:54:39 +0200380 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200381 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200382 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100383 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100384 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200385 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100386 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400387 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100388 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200389 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200390 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100391 case KVM_CAP_S390_AIS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100392 r = 1;
393 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100394 case KVM_CAP_S390_MEM_OP:
395 r = MEM_OP_MAX_SIZE;
396 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200397 case KVM_CAP_NR_VCPUS:
398 case KVM_CAP_MAX_VCPUS:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100399 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200400 if (!kvm_s390_use_sca_entries())
401 r = KVM_MAX_VCPUS;
402 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100403 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200404 break;
Nick Wange1e2e602013-03-25 17:22:58 +0100405 case KVM_CAP_NR_MEMSLOTS:
406 r = KVM_USER_MEM_SLOTS;
407 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200408 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100409 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200410 break;
Eric Farman68c55752014-06-09 10:57:26 -0400411 case KVM_CAP_S390_VECTOR_REGISTERS:
412 r = MACHINE_HAS_VX;
413 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800414 case KVM_CAP_S390_RI:
415 r = test_facility(64);
416 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100417 case KVM_CAP_S390_GS:
418 r = test_facility(133);
419 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200420 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100421 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200422 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100423 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100424}
425
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400426static void kvm_s390_sync_dirty_log(struct kvm *kvm,
427 struct kvm_memory_slot *memslot)
428{
429 gfn_t cur_gfn, last_gfn;
430 unsigned long address;
431 struct gmap *gmap = kvm->arch.gmap;
432
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400433 /* Loop over all guest pages */
434 last_gfn = memslot->base_gfn + memslot->npages;
435 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
436 address = gfn_to_hva_memslot(memslot, cur_gfn);
437
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +0100438 if (test_and_clear_guest_dirty(gmap->mm, address))
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400439 mark_page_dirty(kvm, cur_gfn);
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100440 if (fatal_signal_pending(current))
441 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100442 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400443 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400444}
445
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100446/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200447static void sca_del_vcpu(struct kvm_vcpu *vcpu);
448
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100449/*
450 * Get (and clear) the dirty memory log for a memory slot.
451 */
452int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
453 struct kvm_dirty_log *log)
454{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400455 int r;
456 unsigned long n;
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200457 struct kvm_memslots *slots;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400458 struct kvm_memory_slot *memslot;
459 int is_dirty = 0;
460
Janosch Franke1e8a962017-02-02 16:39:31 +0100461 if (kvm_is_ucontrol(kvm))
462 return -EINVAL;
463
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400464 mutex_lock(&kvm->slots_lock);
465
466 r = -EINVAL;
467 if (log->slot >= KVM_USER_MEM_SLOTS)
468 goto out;
469
Paolo Bonzini9f6b8022015-05-17 16:20:07 +0200470 slots = kvm_memslots(kvm);
471 memslot = id_to_memslot(slots, log->slot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400472 r = -ENOENT;
473 if (!memslot->dirty_bitmap)
474 goto out;
475
476 kvm_s390_sync_dirty_log(kvm, memslot);
477 r = kvm_get_dirty_log(kvm, log, &is_dirty);
478 if (r)
479 goto out;
480
481 /* Clear the dirty log */
482 if (is_dirty) {
483 n = kvm_dirty_bitmap_bytes(memslot);
484 memset(memslot->dirty_bitmap, 0, n);
485 }
486 r = 0;
487out:
488 mutex_unlock(&kvm->slots_lock);
489 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100490}
491
David Hildenbrand6502a342016-06-21 14:19:51 +0200492static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
493{
494 unsigned int i;
495 struct kvm_vcpu *vcpu;
496
497 kvm_for_each_vcpu(i, vcpu, kvm) {
498 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
499 }
500}
501
Cornelia Huckd938dc52013-10-23 18:26:34 +0200502static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
503{
504 int r;
505
506 if (cap->flags)
507 return -EINVAL;
508
509 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200510 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200511 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200512 kvm->arch.use_irqchip = 1;
513 r = 0;
514 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200515 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200516 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200517 kvm->arch.user_sigp = 1;
518 r = 0;
519 break;
Eric Farman68c55752014-06-09 10:57:26 -0400520 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100521 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200522 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100523 r = -EBUSY;
524 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100525 set_kvm_facility(kvm->arch.model.fac_mask, 129);
526 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200527 if (test_facility(134)) {
528 set_kvm_facility(kvm->arch.model.fac_mask, 134);
529 set_kvm_facility(kvm->arch.model.fac_list, 134);
530 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100531 if (test_facility(135)) {
532 set_kvm_facility(kvm->arch.model.fac_mask, 135);
533 set_kvm_facility(kvm->arch.model.fac_list, 135);
534 }
Michael Mueller18280d82015-03-16 16:05:41 +0100535 r = 0;
536 } else
537 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100538 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200539 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
540 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400541 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800542 case KVM_CAP_S390_RI:
543 r = -EINVAL;
544 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200545 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800546 r = -EBUSY;
547 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100548 set_kvm_facility(kvm->arch.model.fac_mask, 64);
549 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800550 r = 0;
551 }
552 mutex_unlock(&kvm->lock);
553 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
554 r ? "(not available)" : "(success)");
555 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100556 case KVM_CAP_S390_AIS:
557 mutex_lock(&kvm->lock);
558 if (kvm->created_vcpus) {
559 r = -EBUSY;
560 } else {
561 set_kvm_facility(kvm->arch.model.fac_mask, 72);
562 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100563 r = 0;
564 }
565 mutex_unlock(&kvm->lock);
566 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
567 r ? "(not available)" : "(success)");
568 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100569 case KVM_CAP_S390_GS:
570 r = -EINVAL;
571 mutex_lock(&kvm->lock);
572 if (atomic_read(&kvm->online_vcpus)) {
573 r = -EBUSY;
574 } else if (test_facility(133)) {
575 set_kvm_facility(kvm->arch.model.fac_mask, 133);
576 set_kvm_facility(kvm->arch.model.fac_list, 133);
577 r = 0;
578 }
579 mutex_unlock(&kvm->lock);
580 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
581 r ? "(not available)" : "(success)");
582 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100583 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200584 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100585 kvm->arch.user_stsi = 1;
586 r = 0;
587 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200588 case KVM_CAP_S390_USER_INSTR0:
589 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
590 kvm->arch.user_instr0 = 1;
591 icpt_operexc_on_all_vcpus(kvm);
592 r = 0;
593 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200594 default:
595 r = -EINVAL;
596 break;
597 }
598 return r;
599}
600
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100601static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
602{
603 int ret;
604
605 switch (attr->attr) {
606 case KVM_S390_VM_MEM_LIMIT_SIZE:
607 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200608 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100609 kvm->arch.mem_limit);
610 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100611 ret = -EFAULT;
612 break;
613 default:
614 ret = -ENXIO;
615 break;
616 }
617 return ret;
618}
619
620static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200621{
622 int ret;
623 unsigned int idx;
624 switch (attr->attr) {
625 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100626 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100627 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200628 break;
629
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200630 ret = -EBUSY;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200631 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200632 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200633 if (!kvm->created_vcpus) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200634 kvm->arch.use_cmma = 1;
635 ret = 0;
636 }
637 mutex_unlock(&kvm->lock);
638 break;
639 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100640 ret = -ENXIO;
641 if (!sclp.has_cmma)
642 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200643 ret = -EINVAL;
644 if (!kvm->arch.use_cmma)
645 break;
646
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200647 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200648 mutex_lock(&kvm->lock);
649 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200650 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200651 srcu_read_unlock(&kvm->srcu, idx);
652 mutex_unlock(&kvm->lock);
653 ret = 0;
654 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100655 case KVM_S390_VM_MEM_LIMIT_SIZE: {
656 unsigned long new_limit;
657
658 if (kvm_is_ucontrol(kvm))
659 return -EINVAL;
660
661 if (get_user(new_limit, (u64 __user *)attr->addr))
662 return -EFAULT;
663
Dominik Dingela3a92c32014-12-01 17:24:42 +0100664 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
665 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100666 return -E2BIG;
667
Dominik Dingela3a92c32014-12-01 17:24:42 +0100668 if (!new_limit)
669 return -EINVAL;
670
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100671 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100672 if (new_limit != KVM_S390_NO_MEM_LIMIT)
673 new_limit -= 1;
674
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100675 ret = -EBUSY;
676 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200677 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100678 /* gmap_create will round the limit up */
679 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100680
681 if (!new) {
682 ret = -ENOMEM;
683 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100684 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100685 new->private = kvm;
686 kvm->arch.gmap = new;
687 ret = 0;
688 }
689 }
690 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100691 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
692 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
693 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100694 break;
695 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200696 default:
697 ret = -ENXIO;
698 break;
699 }
700 return ret;
701}
702
Tony Krowiaka374e892014-09-03 10:13:53 +0200703static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
704
705static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
706{
707 struct kvm_vcpu *vcpu;
708 int i;
709
Michael Mueller9d8d5782015-02-02 15:42:51 +0100710 if (!test_kvm_facility(kvm, 76))
Tony Krowiaka374e892014-09-03 10:13:53 +0200711 return -EINVAL;
712
713 mutex_lock(&kvm->lock);
714 switch (attr->attr) {
715 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
716 get_random_bytes(
717 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
718 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
719 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200720 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200721 break;
722 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
723 get_random_bytes(
724 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
725 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
726 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200727 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200728 break;
729 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
730 kvm->arch.crypto.aes_kw = 0;
731 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
732 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200733 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200734 break;
735 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
736 kvm->arch.crypto.dea_kw = 0;
737 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
738 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200739 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200740 break;
741 default:
742 mutex_unlock(&kvm->lock);
743 return -ENXIO;
744 }
745
746 kvm_for_each_vcpu(i, vcpu, kvm) {
747 kvm_s390_vcpu_crypto_setup(vcpu);
748 exit_sie(vcpu);
749 }
750 mutex_unlock(&kvm->lock);
751 return 0;
752}
753
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200754static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
755{
756 int cx;
757 struct kvm_vcpu *vcpu;
758
759 kvm_for_each_vcpu(cx, vcpu, kvm)
760 kvm_s390_sync_request(req, vcpu);
761}
762
763/*
764 * Must be called with kvm->srcu held to avoid races on memslots, and with
765 * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
766 */
767static int kvm_s390_vm_start_migration(struct kvm *kvm)
768{
769 struct kvm_s390_migration_state *mgs;
770 struct kvm_memory_slot *ms;
771 /* should be the only one */
772 struct kvm_memslots *slots;
773 unsigned long ram_pages;
774 int slotnr;
775
776 /* migration mode already enabled */
777 if (kvm->arch.migration_state)
778 return 0;
779
780 slots = kvm_memslots(kvm);
781 if (!slots || !slots->used_slots)
782 return -EINVAL;
783
784 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
785 if (!mgs)
786 return -ENOMEM;
787 kvm->arch.migration_state = mgs;
788
789 if (kvm->arch.use_cmma) {
790 /*
791 * Get the last slot. They should be sorted by base_gfn, so the
792 * last slot is also the one at the end of the address space.
793 * We have verified above that at least one slot is present.
794 */
795 ms = slots->memslots + slots->used_slots - 1;
796 /* round up so we only use full longs */
797 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
798 /* allocate enough bytes to store all the bits */
799 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
800 if (!mgs->pgste_bitmap) {
801 kfree(mgs);
802 kvm->arch.migration_state = NULL;
803 return -ENOMEM;
804 }
805
806 mgs->bitmap_size = ram_pages;
807 atomic64_set(&mgs->dirty_pages, ram_pages);
808 /* mark all the pages in active slots as dirty */
809 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
810 ms = slots->memslots + slotnr;
811 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
812 }
813
814 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
815 }
816 return 0;
817}
818
819/*
820 * Must be called with kvm->lock to avoid races with ourselves and
821 * kvm_s390_vm_start_migration.
822 */
823static int kvm_s390_vm_stop_migration(struct kvm *kvm)
824{
825 struct kvm_s390_migration_state *mgs;
826
827 /* migration mode already disabled */
828 if (!kvm->arch.migration_state)
829 return 0;
830 mgs = kvm->arch.migration_state;
831 kvm->arch.migration_state = NULL;
832
833 if (kvm->arch.use_cmma) {
834 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
835 vfree(mgs->pgste_bitmap);
836 }
837 kfree(mgs);
838 return 0;
839}
840
841static int kvm_s390_vm_set_migration(struct kvm *kvm,
842 struct kvm_device_attr *attr)
843{
844 int idx, res = -ENXIO;
845
846 mutex_lock(&kvm->lock);
847 switch (attr->attr) {
848 case KVM_S390_VM_MIGRATION_START:
849 idx = srcu_read_lock(&kvm->srcu);
850 res = kvm_s390_vm_start_migration(kvm);
851 srcu_read_unlock(&kvm->srcu, idx);
852 break;
853 case KVM_S390_VM_MIGRATION_STOP:
854 res = kvm_s390_vm_stop_migration(kvm);
855 break;
856 default:
857 break;
858 }
859 mutex_unlock(&kvm->lock);
860
861 return res;
862}
863
864static int kvm_s390_vm_get_migration(struct kvm *kvm,
865 struct kvm_device_attr *attr)
866{
867 u64 mig = (kvm->arch.migration_state != NULL);
868
869 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
870 return -ENXIO;
871
872 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
873 return -EFAULT;
874 return 0;
875}
876
Jason J. Herne72f25022014-11-25 09:46:02 -0500877static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
878{
879 u8 gtod_high;
880
881 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
882 sizeof(gtod_high)))
883 return -EFAULT;
884
885 if (gtod_high != 0)
886 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200887 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500888
889 return 0;
890}
891
892static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
893{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200894 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500895
896 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
897 return -EFAULT;
898
David Hildenbrand25ed1672015-05-12 09:49:14 +0200899 kvm_s390_set_tod_clock(kvm, gtod);
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200900 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500901 return 0;
902}
903
904static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
905{
906 int ret;
907
908 if (attr->flags)
909 return -EINVAL;
910
911 switch (attr->attr) {
912 case KVM_S390_VM_TOD_HIGH:
913 ret = kvm_s390_set_tod_high(kvm, attr);
914 break;
915 case KVM_S390_VM_TOD_LOW:
916 ret = kvm_s390_set_tod_low(kvm, attr);
917 break;
918 default:
919 ret = -ENXIO;
920 break;
921 }
922 return ret;
923}
924
925static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
926{
927 u8 gtod_high = 0;
928
929 if (copy_to_user((void __user *)attr->addr, &gtod_high,
930 sizeof(gtod_high)))
931 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200932 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -0500933
934 return 0;
935}
936
937static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
938{
David Hildenbrand5a3d8832015-09-29 16:27:24 +0200939 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -0500940
David Hildenbrand60417fc2015-09-29 16:20:36 +0200941 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -0500942 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
943 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +0200944 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -0500945
946 return 0;
947}
948
949static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
950{
951 int ret;
952
953 if (attr->flags)
954 return -EINVAL;
955
956 switch (attr->attr) {
957 case KVM_S390_VM_TOD_HIGH:
958 ret = kvm_s390_get_tod_high(kvm, attr);
959 break;
960 case KVM_S390_VM_TOD_LOW:
961 ret = kvm_s390_get_tod_low(kvm, attr);
962 break;
963 default:
964 ret = -ENXIO;
965 break;
966 }
967 return ret;
968}
969
Michael Mueller658b6ed2015-02-02 15:49:35 +0100970static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
971{
972 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +0200973 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +0100974 int ret = 0;
975
976 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200977 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +0100978 ret = -EBUSY;
979 goto out;
980 }
981 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
982 if (!proc) {
983 ret = -ENOMEM;
984 goto out;
985 }
986 if (!copy_from_user(proc, (void __user *)attr->addr,
987 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +0200988 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +0200989 lowest_ibc = sclp.ibc >> 16 & 0xfff;
990 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +0200991 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +0200992 if (proc->ibc > unblocked_ibc)
993 kvm->arch.model.ibc = unblocked_ibc;
994 else if (proc->ibc < lowest_ibc)
995 kvm->arch.model.ibc = lowest_ibc;
996 else
997 kvm->arch.model.ibc = proc->ibc;
998 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100999 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001000 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001001 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1002 kvm->arch.model.ibc,
1003 kvm->arch.model.cpuid);
1004 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1005 kvm->arch.model.fac_list[0],
1006 kvm->arch.model.fac_list[1],
1007 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001008 } else
1009 ret = -EFAULT;
1010 kfree(proc);
1011out:
1012 mutex_unlock(&kvm->lock);
1013 return ret;
1014}
1015
David Hildenbrand15c97052015-03-19 17:36:43 +01001016static int kvm_s390_set_processor_feat(struct kvm *kvm,
1017 struct kvm_device_attr *attr)
1018{
1019 struct kvm_s390_vm_cpu_feat data;
1020 int ret = -EBUSY;
1021
1022 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1023 return -EFAULT;
1024 if (!bitmap_subset((unsigned long *) data.feat,
1025 kvm_s390_available_cpu_feat,
1026 KVM_S390_VM_CPU_FEAT_NR_BITS))
1027 return -EINVAL;
1028
1029 mutex_lock(&kvm->lock);
1030 if (!atomic_read(&kvm->online_vcpus)) {
1031 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1032 KVM_S390_VM_CPU_FEAT_NR_BITS);
1033 ret = 0;
1034 }
1035 mutex_unlock(&kvm->lock);
1036 return ret;
1037}
1038
David Hildenbrand0a763c72016-05-18 16:03:47 +02001039static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1040 struct kvm_device_attr *attr)
1041{
1042 /*
1043 * Once supported by kernel + hw, we have to store the subfunctions
1044 * in kvm->arch and remember that user space configured them.
1045 */
1046 return -ENXIO;
1047}
1048
Michael Mueller658b6ed2015-02-02 15:49:35 +01001049static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1050{
1051 int ret = -ENXIO;
1052
1053 switch (attr->attr) {
1054 case KVM_S390_VM_CPU_PROCESSOR:
1055 ret = kvm_s390_set_processor(kvm, attr);
1056 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001057 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1058 ret = kvm_s390_set_processor_feat(kvm, attr);
1059 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001060 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1061 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1062 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001063 }
1064 return ret;
1065}
1066
1067static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1068{
1069 struct kvm_s390_vm_cpu_processor *proc;
1070 int ret = 0;
1071
1072 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1073 if (!proc) {
1074 ret = -ENOMEM;
1075 goto out;
1076 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001077 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001078 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001079 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1080 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001081 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1082 kvm->arch.model.ibc,
1083 kvm->arch.model.cpuid);
1084 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1085 kvm->arch.model.fac_list[0],
1086 kvm->arch.model.fac_list[1],
1087 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001088 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1089 ret = -EFAULT;
1090 kfree(proc);
1091out:
1092 return ret;
1093}
1094
1095static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1096{
1097 struct kvm_s390_vm_cpu_machine *mach;
1098 int ret = 0;
1099
1100 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1101 if (!mach) {
1102 ret = -ENOMEM;
1103 goto out;
1104 }
1105 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001106 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001107 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001108 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001109 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001110 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001111 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1112 kvm->arch.model.ibc,
1113 kvm->arch.model.cpuid);
1114 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1115 mach->fac_mask[0],
1116 mach->fac_mask[1],
1117 mach->fac_mask[2]);
1118 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1119 mach->fac_list[0],
1120 mach->fac_list[1],
1121 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001122 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1123 ret = -EFAULT;
1124 kfree(mach);
1125out:
1126 return ret;
1127}
1128
David Hildenbrand15c97052015-03-19 17:36:43 +01001129static int kvm_s390_get_processor_feat(struct kvm *kvm,
1130 struct kvm_device_attr *attr)
1131{
1132 struct kvm_s390_vm_cpu_feat data;
1133
1134 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1135 KVM_S390_VM_CPU_FEAT_NR_BITS);
1136 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1137 return -EFAULT;
1138 return 0;
1139}
1140
1141static int kvm_s390_get_machine_feat(struct kvm *kvm,
1142 struct kvm_device_attr *attr)
1143{
1144 struct kvm_s390_vm_cpu_feat data;
1145
1146 bitmap_copy((unsigned long *) data.feat,
1147 kvm_s390_available_cpu_feat,
1148 KVM_S390_VM_CPU_FEAT_NR_BITS);
1149 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1150 return -EFAULT;
1151 return 0;
1152}
1153
David Hildenbrand0a763c72016-05-18 16:03:47 +02001154static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1155 struct kvm_device_attr *attr)
1156{
1157 /*
1158 * Once we can actually configure subfunctions (kernel + hw support),
1159 * we have to check if they were already set by user space, if so copy
1160 * them from kvm->arch.
1161 */
1162 return -ENXIO;
1163}
1164
1165static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1166 struct kvm_device_attr *attr)
1167{
1168 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1169 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1170 return -EFAULT;
1171 return 0;
1172}
Michael Mueller658b6ed2015-02-02 15:49:35 +01001173static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1174{
1175 int ret = -ENXIO;
1176
1177 switch (attr->attr) {
1178 case KVM_S390_VM_CPU_PROCESSOR:
1179 ret = kvm_s390_get_processor(kvm, attr);
1180 break;
1181 case KVM_S390_VM_CPU_MACHINE:
1182 ret = kvm_s390_get_machine(kvm, attr);
1183 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001184 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1185 ret = kvm_s390_get_processor_feat(kvm, attr);
1186 break;
1187 case KVM_S390_VM_CPU_MACHINE_FEAT:
1188 ret = kvm_s390_get_machine_feat(kvm, attr);
1189 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001190 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1191 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1192 break;
1193 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1194 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1195 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001196 }
1197 return ret;
1198}
1199
Dominik Dingelf2061652014-04-09 13:13:00 +02001200static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1201{
1202 int ret;
1203
1204 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001205 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001206 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001207 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001208 case KVM_S390_VM_TOD:
1209 ret = kvm_s390_set_tod(kvm, attr);
1210 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001211 case KVM_S390_VM_CPU_MODEL:
1212 ret = kvm_s390_set_cpu_model(kvm, attr);
1213 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001214 case KVM_S390_VM_CRYPTO:
1215 ret = kvm_s390_vm_set_crypto(kvm, attr);
1216 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001217 case KVM_S390_VM_MIGRATION:
1218 ret = kvm_s390_vm_set_migration(kvm, attr);
1219 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001220 default:
1221 ret = -ENXIO;
1222 break;
1223 }
1224
1225 return ret;
1226}
1227
1228static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1229{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001230 int ret;
1231
1232 switch (attr->group) {
1233 case KVM_S390_VM_MEM_CTRL:
1234 ret = kvm_s390_get_mem_control(kvm, attr);
1235 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001236 case KVM_S390_VM_TOD:
1237 ret = kvm_s390_get_tod(kvm, attr);
1238 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001239 case KVM_S390_VM_CPU_MODEL:
1240 ret = kvm_s390_get_cpu_model(kvm, attr);
1241 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001242 case KVM_S390_VM_MIGRATION:
1243 ret = kvm_s390_vm_get_migration(kvm, attr);
1244 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001245 default:
1246 ret = -ENXIO;
1247 break;
1248 }
1249
1250 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001251}
1252
1253static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1254{
1255 int ret;
1256
1257 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001258 case KVM_S390_VM_MEM_CTRL:
1259 switch (attr->attr) {
1260 case KVM_S390_VM_MEM_ENABLE_CMMA:
1261 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001262 ret = sclp.has_cmma ? 0 : -ENXIO;
1263 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001264 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001265 ret = 0;
1266 break;
1267 default:
1268 ret = -ENXIO;
1269 break;
1270 }
1271 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001272 case KVM_S390_VM_TOD:
1273 switch (attr->attr) {
1274 case KVM_S390_VM_TOD_LOW:
1275 case KVM_S390_VM_TOD_HIGH:
1276 ret = 0;
1277 break;
1278 default:
1279 ret = -ENXIO;
1280 break;
1281 }
1282 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 case KVM_S390_VM_CPU_MODEL:
1284 switch (attr->attr) {
1285 case KVM_S390_VM_CPU_PROCESSOR:
1286 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001287 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1288 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001289 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001290 ret = 0;
1291 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001292 /* configuring subfunctions is not supported yet */
1293 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001294 default:
1295 ret = -ENXIO;
1296 break;
1297 }
1298 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001299 case KVM_S390_VM_CRYPTO:
1300 switch (attr->attr) {
1301 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1302 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1303 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1304 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1305 ret = 0;
1306 break;
1307 default:
1308 ret = -ENXIO;
1309 break;
1310 }
1311 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001312 case KVM_S390_VM_MIGRATION:
1313 ret = 0;
1314 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001315 default:
1316 ret = -ENXIO;
1317 break;
1318 }
1319
1320 return ret;
1321}
1322
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001323static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1324{
1325 uint8_t *keys;
1326 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001327 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001328
1329 if (args->flags != 0)
1330 return -EINVAL;
1331
1332 /* Is this guest using storage keys? */
1333 if (!mm_use_skey(current->mm))
1334 return KVM_S390_GET_SKEYS_NONE;
1335
1336 /* Enforce sane limit on memory allocation */
1337 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1338 return -EINVAL;
1339
Michal Hocko752ade62017-05-08 15:57:27 -07001340 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001341 if (!keys)
1342 return -ENOMEM;
1343
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001344 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001345 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001346 for (i = 0; i < args->count; i++) {
1347 hva = gfn_to_hva(kvm, args->start_gfn + i);
1348 if (kvm_is_error_hva(hva)) {
1349 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001350 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001351 }
1352
David Hildenbrand154c8c12016-05-09 11:22:34 +02001353 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1354 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001355 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001356 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001357 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001358 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001359
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001360 if (!r) {
1361 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1362 sizeof(uint8_t) * args->count);
1363 if (r)
1364 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001365 }
1366
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001367 kvfree(keys);
1368 return r;
1369}
1370
1371static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1372{
1373 uint8_t *keys;
1374 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001375 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001376
1377 if (args->flags != 0)
1378 return -EINVAL;
1379
1380 /* Enforce sane limit on memory allocation */
1381 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1382 return -EINVAL;
1383
Michal Hocko752ade62017-05-08 15:57:27 -07001384 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001385 if (!keys)
1386 return -ENOMEM;
1387
1388 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1389 sizeof(uint8_t) * args->count);
1390 if (r) {
1391 r = -EFAULT;
1392 goto out;
1393 }
1394
1395 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001396 r = s390_enable_skey();
1397 if (r)
1398 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001399
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001400 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001401 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001402 for (i = 0; i < args->count; i++) {
1403 hva = gfn_to_hva(kvm, args->start_gfn + i);
1404 if (kvm_is_error_hva(hva)) {
1405 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001406 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001407 }
1408
1409 /* Lowest order bit is reserved */
1410 if (keys[i] & 0x01) {
1411 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001412 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001413 }
1414
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001415 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001416 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001417 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001418 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001419 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001420 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001421out:
1422 kvfree(keys);
1423 return r;
1424}
1425
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001426/*
1427 * Base address and length must be sent at the start of each block, therefore
1428 * it's cheaper to send some clean data, as long as it's less than the size of
1429 * two longs.
1430 */
1431#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1432/* for consistency */
1433#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1434
1435/*
1436 * This function searches for the next page with dirty CMMA attributes, and
1437 * saves the attributes in the buffer up to either the end of the buffer or
1438 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1439 * no trailing clean bytes are saved.
1440 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1441 * output buffer will indicate 0 as length.
1442 */
1443static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1444 struct kvm_s390_cmma_log *args)
1445{
1446 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1447 unsigned long bufsize, hva, pgstev, i, next, cur;
1448 int srcu_idx, peek, r = 0, rr;
1449 u8 *res;
1450
1451 cur = args->start_gfn;
1452 i = next = pgstev = 0;
1453
1454 if (unlikely(!kvm->arch.use_cmma))
1455 return -ENXIO;
1456 /* Invalid/unsupported flags were specified */
1457 if (args->flags & ~KVM_S390_CMMA_PEEK)
1458 return -EINVAL;
1459 /* Migration mode query, and we are not doing a migration */
1460 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1461 if (!peek && !s)
1462 return -EINVAL;
1463 /* CMMA is disabled or was not used, or the buffer has length zero */
1464 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1465 if (!bufsize || !kvm->mm->context.use_cmma) {
1466 memset(args, 0, sizeof(*args));
1467 return 0;
1468 }
1469
1470 if (!peek) {
1471 /* We are not peeking, and there are no dirty pages */
1472 if (!atomic64_read(&s->dirty_pages)) {
1473 memset(args, 0, sizeof(*args));
1474 return 0;
1475 }
1476 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1477 args->start_gfn);
1478 if (cur >= s->bitmap_size) /* nothing found, loop back */
1479 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1480 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1481 memset(args, 0, sizeof(*args));
1482 return 0;
1483 }
1484 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1485 }
1486
1487 res = vmalloc(bufsize);
1488 if (!res)
1489 return -ENOMEM;
1490
1491 args->start_gfn = cur;
1492
1493 down_read(&kvm->mm->mmap_sem);
1494 srcu_idx = srcu_read_lock(&kvm->srcu);
1495 while (i < bufsize) {
1496 hva = gfn_to_hva(kvm, cur);
1497 if (kvm_is_error_hva(hva)) {
1498 r = -EFAULT;
1499 break;
1500 }
1501 /* decrement only if we actually flipped the bit to 0 */
1502 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1503 atomic64_dec(&s->dirty_pages);
1504 r = get_pgste(kvm->mm, hva, &pgstev);
1505 if (r < 0)
1506 pgstev = 0;
1507 /* save the value */
1508 res[i++] = (pgstev >> 24) & 0x3;
1509 /*
1510 * if the next bit is too far away, stop.
1511 * if we reached the previous "next", find the next one
1512 */
1513 if (!peek) {
1514 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1515 break;
1516 if (cur == next)
1517 next = find_next_bit(s->pgste_bitmap,
1518 s->bitmap_size, cur + 1);
1519 /* reached the end of the bitmap or of the buffer, stop */
1520 if ((next >= s->bitmap_size) ||
1521 (next >= args->start_gfn + bufsize))
1522 break;
1523 }
1524 cur++;
1525 }
1526 srcu_read_unlock(&kvm->srcu, srcu_idx);
1527 up_read(&kvm->mm->mmap_sem);
1528 args->count = i;
1529 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1530
1531 rr = copy_to_user((void __user *)args->values, res, args->count);
1532 if (rr)
1533 r = -EFAULT;
1534
1535 vfree(res);
1536 return r;
1537}
1538
1539/*
1540 * This function sets the CMMA attributes for the given pages. If the input
1541 * buffer has zero length, no action is taken, otherwise the attributes are
1542 * set and the mm->context.use_cmma flag is set.
1543 */
1544static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1545 const struct kvm_s390_cmma_log *args)
1546{
1547 unsigned long hva, mask, pgstev, i;
1548 uint8_t *bits;
1549 int srcu_idx, r = 0;
1550
1551 mask = args->mask;
1552
1553 if (!kvm->arch.use_cmma)
1554 return -ENXIO;
1555 /* invalid/unsupported flags */
1556 if (args->flags != 0)
1557 return -EINVAL;
1558 /* Enforce sane limit on memory allocation */
1559 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1560 return -EINVAL;
1561 /* Nothing to do */
1562 if (args->count == 0)
1563 return 0;
1564
1565 bits = vmalloc(sizeof(*bits) * args->count);
1566 if (!bits)
1567 return -ENOMEM;
1568
1569 r = copy_from_user(bits, (void __user *)args->values, args->count);
1570 if (r) {
1571 r = -EFAULT;
1572 goto out;
1573 }
1574
1575 down_read(&kvm->mm->mmap_sem);
1576 srcu_idx = srcu_read_lock(&kvm->srcu);
1577 for (i = 0; i < args->count; i++) {
1578 hva = gfn_to_hva(kvm, args->start_gfn + i);
1579 if (kvm_is_error_hva(hva)) {
1580 r = -EFAULT;
1581 break;
1582 }
1583
1584 pgstev = bits[i];
1585 pgstev = pgstev << 24;
1586 mask &= _PGSTE_GPS_USAGE_MASK;
1587 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1588 }
1589 srcu_read_unlock(&kvm->srcu, srcu_idx);
1590 up_read(&kvm->mm->mmap_sem);
1591
1592 if (!kvm->mm->context.use_cmma) {
1593 down_write(&kvm->mm->mmap_sem);
1594 kvm->mm->context.use_cmma = 1;
1595 up_write(&kvm->mm->mmap_sem);
1596 }
1597out:
1598 vfree(bits);
1599 return r;
1600}
1601
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001602long kvm_arch_vm_ioctl(struct file *filp,
1603 unsigned int ioctl, unsigned long arg)
1604{
1605 struct kvm *kvm = filp->private_data;
1606 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02001607 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001608 int r;
1609
1610 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01001611 case KVM_S390_INTERRUPT: {
1612 struct kvm_s390_interrupt s390int;
1613
1614 r = -EFAULT;
1615 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1616 break;
1617 r = kvm_s390_inject_vm(kvm, &s390int);
1618 break;
1619 }
Cornelia Huckd938dc52013-10-23 18:26:34 +02001620 case KVM_ENABLE_CAP: {
1621 struct kvm_enable_cap cap;
1622 r = -EFAULT;
1623 if (copy_from_user(&cap, argp, sizeof(cap)))
1624 break;
1625 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1626 break;
1627 }
Cornelia Huck84223592013-07-15 13:36:01 +02001628 case KVM_CREATE_IRQCHIP: {
1629 struct kvm_irq_routing_entry routing;
1630
1631 r = -EINVAL;
1632 if (kvm->arch.use_irqchip) {
1633 /* Set up dummy routing. */
1634 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04001635 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02001636 }
1637 break;
1638 }
Dominik Dingelf2061652014-04-09 13:13:00 +02001639 case KVM_SET_DEVICE_ATTR: {
1640 r = -EFAULT;
1641 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1642 break;
1643 r = kvm_s390_vm_set_attr(kvm, &attr);
1644 break;
1645 }
1646 case KVM_GET_DEVICE_ATTR: {
1647 r = -EFAULT;
1648 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1649 break;
1650 r = kvm_s390_vm_get_attr(kvm, &attr);
1651 break;
1652 }
1653 case KVM_HAS_DEVICE_ATTR: {
1654 r = -EFAULT;
1655 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1656 break;
1657 r = kvm_s390_vm_has_attr(kvm, &attr);
1658 break;
1659 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001660 case KVM_S390_GET_SKEYS: {
1661 struct kvm_s390_skeys args;
1662
1663 r = -EFAULT;
1664 if (copy_from_user(&args, argp,
1665 sizeof(struct kvm_s390_skeys)))
1666 break;
1667 r = kvm_s390_get_skeys(kvm, &args);
1668 break;
1669 }
1670 case KVM_S390_SET_SKEYS: {
1671 struct kvm_s390_skeys args;
1672
1673 r = -EFAULT;
1674 if (copy_from_user(&args, argp,
1675 sizeof(struct kvm_s390_skeys)))
1676 break;
1677 r = kvm_s390_set_skeys(kvm, &args);
1678 break;
1679 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001680 case KVM_S390_GET_CMMA_BITS: {
1681 struct kvm_s390_cmma_log args;
1682
1683 r = -EFAULT;
1684 if (copy_from_user(&args, argp, sizeof(args)))
1685 break;
1686 r = kvm_s390_get_cmma_bits(kvm, &args);
1687 if (!r) {
1688 r = copy_to_user(argp, &args, sizeof(args));
1689 if (r)
1690 r = -EFAULT;
1691 }
1692 break;
1693 }
1694 case KVM_S390_SET_CMMA_BITS: {
1695 struct kvm_s390_cmma_log args;
1696
1697 r = -EFAULT;
1698 if (copy_from_user(&args, argp, sizeof(args)))
1699 break;
1700 r = kvm_s390_set_cmma_bits(kvm, &args);
1701 break;
1702 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001703 default:
Avi Kivity367e1312009-08-26 14:57:07 +03001704 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001705 }
1706
1707 return r;
1708}
1709
Tony Krowiak45c9b472015-01-13 11:33:26 -05001710static int kvm_s390_query_ap_config(u8 *config)
1711{
1712 u32 fcn_code = 0x04000000UL;
Christian Borntraeger86044c82015-02-26 13:53:47 +01001713 u32 cc = 0;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001714
Christian Borntraeger86044c82015-02-26 13:53:47 +01001715 memset(config, 0, 128);
Tony Krowiak45c9b472015-01-13 11:33:26 -05001716 asm volatile(
1717 "lgr 0,%1\n"
1718 "lgr 2,%2\n"
1719 ".long 0xb2af0000\n" /* PQAP(QCI) */
Christian Borntraeger86044c82015-02-26 13:53:47 +01001720 "0: ipm %0\n"
Tony Krowiak45c9b472015-01-13 11:33:26 -05001721 "srl %0,28\n"
Christian Borntraeger86044c82015-02-26 13:53:47 +01001722 "1:\n"
1723 EX_TABLE(0b, 1b)
1724 : "+r" (cc)
Tony Krowiak45c9b472015-01-13 11:33:26 -05001725 : "r" (fcn_code), "r" (config)
1726 : "cc", "0", "2", "memory"
1727 );
1728
1729 return cc;
1730}
1731
1732static int kvm_s390_apxa_installed(void)
1733{
1734 u8 config[128];
1735 int cc;
1736
Heiko Carstensa6aacc32015-11-24 14:28:12 +01001737 if (test_facility(12)) {
Tony Krowiak45c9b472015-01-13 11:33:26 -05001738 cc = kvm_s390_query_ap_config(config);
1739
1740 if (cc)
1741 pr_err("PQAP(QCI) failed with cc=%d", cc);
1742 else
1743 return config[0] & 0x40;
1744 }
1745
1746 return 0;
1747}
1748
1749static void kvm_s390_set_crycb_format(struct kvm *kvm)
1750{
1751 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1752
1753 if (kvm_s390_apxa_installed())
1754 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1755 else
1756 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1757}
1758
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001759static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01001760{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001761 struct cpuid cpuid;
1762
1763 get_cpu_id(&cpuid);
1764 cpuid.version = 0xff;
1765 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01001766}
1767
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001768static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04001769{
Michael Mueller9d8d5782015-02-02 15:42:51 +01001770 if (!test_kvm_facility(kvm, 76))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001771 return;
Tony Krowiak5102ee82014-06-27 14:46:01 -04001772
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001773 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05001774 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001775
Tony Krowiaked6f76b2015-02-24 14:06:57 -05001776 /* Enable AES/DEA protected key functions by default */
1777 kvm->arch.crypto.aes_kw = 1;
1778 kvm->arch.crypto.dea_kw = 1;
1779 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1780 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1781 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1782 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04001783}
1784
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001785static void sca_dispose(struct kvm *kvm)
1786{
1787 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001788 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001789 else
1790 free_page((unsigned long)(kvm->arch.sca));
1791 kvm->arch.sca = NULL;
1792}
1793
Carsten Ottee08b9632012-01-04 10:25:20 +01001794int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001795{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001796 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001797 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001798 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001799 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001800
Carsten Ottee08b9632012-01-04 10:25:20 +01001801 rc = -EINVAL;
1802#ifdef CONFIG_KVM_S390_UCONTROL
1803 if (type & ~KVM_VM_S390_UCONTROL)
1804 goto out_err;
1805 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1806 goto out_err;
1807#else
1808 if (type)
1809 goto out_err;
1810#endif
1811
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001812 rc = s390_enable_sie();
1813 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001814 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001815
Carsten Otteb2904112011-10-18 12:27:13 +02001816 rc = -ENOMEM;
1817
Janosch Frank7d0a5e62016-05-10 15:03:42 +02001818 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1819
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001820 kvm->arch.use_esca = 0; /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001821 if (!sclp.has_64bscao)
1822 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001823 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand76a6dd72015-11-24 13:33:49 +01001824 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001825 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001826 goto out_err;
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001827 spin_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001828 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001829 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01001830 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001831 kvm->arch.sca = (struct bsca_block *)
1832 ((char *) kvm->arch.sca + sca_offset);
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01001833 spin_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001834
1835 sprintf(debug_name, "kvm-%u", current->pid);
1836
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02001837 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001838 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001839 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001840
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001841 kvm->arch.sie_page2 =
1842 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1843 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001844 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001845
Michael Muellerfb5bf932015-02-27 14:25:10 +01001846 /* Populate the facility mask initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001847 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001848 sizeof(S390_lowcore.stfle_fac_list));
Michael Mueller9d8d5782015-02-02 15:42:51 +01001849 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1850 if (i < kvm_s390_fac_list_mask_size())
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001851 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
Michael Mueller9d8d5782015-02-02 15:42:51 +01001852 else
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001853 kvm->arch.model.fac_mask[i] = 0UL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001854 }
1855
Michael Mueller981467c2015-02-24 13:51:04 +01001856 /* Populate the facility list initially. */
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001857 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1858 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001859 S390_ARCH_FAC_LIST_SIZE_BYTE);
1860
Janosch Frank95ca2cb2016-05-23 15:11:58 +02001861 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1862 set_kvm_facility(kvm->arch.model.fac_list, 74);
1863
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001864 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001865 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01001866
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001867 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04001868
Fei Li51978392017-02-17 17:06:26 +08001869 mutex_init(&kvm->arch.float_int.ais_lock);
1870 kvm->arch.float_int.simm = 0;
1871 kvm->arch.float_int.nimm = 0;
Carsten Otteba5c1e92008-03-25 18:47:26 +01001872 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02001873 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1874 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01001875 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02001876 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01001877
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001878 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001879 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001880
Carsten Ottee08b9632012-01-04 10:25:20 +01001881 if (type & KVM_VM_S390_UCONTROL) {
1882 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01001883 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01001884 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001885 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001886 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001887 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02001888 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01001889 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001890 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01001891 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01001892 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02001893 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02001894 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01001895 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001896
1897 kvm->arch.css_support = 0;
Cornelia Huck84223592013-07-15 13:36:01 +02001898 kvm->arch.use_irqchip = 0;
Jason J. Herne72f25022014-11-25 09:46:02 -05001899 kvm->arch.epoch = 0;
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01001900
David Hildenbrand8ad35752014-03-14 11:00:21 +01001901 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001902 kvm_s390_vsie_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001903 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01001904
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001905 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001906out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001907 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01001908 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001909 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02001910 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01001911 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001912}
1913
Luiz Capitulino235539b2016-09-07 14:47:23 -04001914bool kvm_arch_has_vcpu_debugfs(void)
1915{
1916 return false;
1917}
1918
1919int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
1920{
1921 return 0;
1922}
1923
Christian Borntraegerd329c032008-11-26 14:50:27 +01001924void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1925{
1926 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02001927 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001928 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02001929 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02001930 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001931 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01001932
1933 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001934 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01001935
Dominik Dingele6db1d62015-05-07 15:41:57 +02001936 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01001937 kvm_s390_vcpu_unsetup_cmma(vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001938 free_page((unsigned long)(vcpu->arch.sie_block));
Konstantin Weitzb31288f2013-04-17 17:36:29 +02001939
Christian Borntraeger6692cef2008-11-26 14:51:08 +01001940 kvm_vcpu_uninit(vcpu);
Michael Muellerb110fea2013-06-12 13:54:54 +02001941 kmem_cache_free(kvm_vcpu_cache, vcpu);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001942}
1943
1944static void kvm_free_vcpus(struct kvm *kvm)
1945{
1946 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001947 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01001948
Gleb Natapov988a2ca2009-06-09 15:56:29 +03001949 kvm_for_each_vcpu(i, vcpu, kvm)
1950 kvm_arch_vcpu_destroy(vcpu);
1951
1952 mutex_lock(&kvm->lock);
1953 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1954 kvm->vcpus[i] = NULL;
1955
1956 atomic_set(&kvm->online_vcpus, 0);
1957 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001958}
1959
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001960void kvm_arch_destroy_vm(struct kvm *kvm)
1961{
Christian Borntraegerd329c032008-11-26 14:50:27 +01001962 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001963 sca_dispose(kvm);
Christian Borntraegerd329c032008-11-26 14:50:27 +01001964 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001965 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01001966 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001967 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02001968 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01001969 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02001970 kvm_s390_vsie_destroy(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001971 if (kvm->arch.migration_state) {
1972 vfree(kvm->arch.migration_state->pgste_bitmap);
1973 kfree(kvm->arch.migration_state);
1974 }
Christian Borntraeger8335713a2015-12-08 16:55:27 +01001975 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01001976}
1977
1978/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01001979static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1980{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01001981 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01001982 if (!vcpu->arch.gmap)
1983 return -ENOMEM;
1984 vcpu->arch.gmap->private = vcpu->kvm;
1985
1986 return 0;
1987}
1988
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001989static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1990{
David Hildenbranda6940672016-08-08 22:39:32 +02001991 if (!kvm_s390_use_sca_entries())
1992 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02001993 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001994 if (vcpu->kvm->arch.use_esca) {
1995 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02001996
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001997 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02001998 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02001999 } else {
2000 struct bsca_block *sca = vcpu->kvm->arch.sca;
2001
2002 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002003 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002004 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002005 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002006}
2007
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002008static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002009{
David Hildenbranda6940672016-08-08 22:39:32 +02002010 if (!kvm_s390_use_sca_entries()) {
2011 struct bsca_block *sca = vcpu->kvm->arch.sca;
2012
2013 /* we still need the basic sca for the ipte control */
2014 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2015 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
2016 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002017 read_lock(&vcpu->kvm->arch.sca_lock);
2018 if (vcpu->kvm->arch.use_esca) {
2019 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002020
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002021 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002022 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2023 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002024 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002025 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002026 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002027 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002028
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002029 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002030 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2031 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002032 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002033 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002034 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002035}
2036
2037/* Basic SCA to Extended SCA data copy routines */
2038static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2039{
2040 d->sda = s->sda;
2041 d->sigp_ctrl.c = s->sigp_ctrl.c;
2042 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2043}
2044
2045static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2046{
2047 int i;
2048
2049 d->ipte_control = s->ipte_control;
2050 d->mcn[0] = s->mcn;
2051 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2052 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2053}
2054
2055static int sca_switch_to_extended(struct kvm *kvm)
2056{
2057 struct bsca_block *old_sca = kvm->arch.sca;
2058 struct esca_block *new_sca;
2059 struct kvm_vcpu *vcpu;
2060 unsigned int vcpu_idx;
2061 u32 scaol, scaoh;
2062
2063 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2064 if (!new_sca)
2065 return -ENOMEM;
2066
2067 scaoh = (u32)((u64)(new_sca) >> 32);
2068 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2069
2070 kvm_s390_vcpu_block_all(kvm);
2071 write_lock(&kvm->arch.sca_lock);
2072
2073 sca_copy_b_to_e(new_sca, old_sca);
2074
2075 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2076 vcpu->arch.sie_block->scaoh = scaoh;
2077 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002078 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002079 }
2080 kvm->arch.sca = new_sca;
2081 kvm->arch.use_esca = 1;
2082
2083 write_unlock(&kvm->arch.sca_lock);
2084 kvm_s390_vcpu_unblock_all(kvm);
2085
2086 free_page((unsigned long)old_sca);
2087
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002088 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2089 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002090 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002091}
2092
2093static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2094{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002095 int rc;
2096
David Hildenbranda6940672016-08-08 22:39:32 +02002097 if (!kvm_s390_use_sca_entries()) {
2098 if (id < KVM_MAX_VCPUS)
2099 return true;
2100 return false;
2101 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002102 if (id < KVM_S390_BSCA_CPU_SLOTS)
2103 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002104 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002105 return false;
2106
2107 mutex_lock(&kvm->lock);
2108 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2109 mutex_unlock(&kvm->lock);
2110
2111 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002112}
2113
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002114int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2115{
Dominik Dingel3c038e62013-10-07 17:11:48 +02002116 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2117 kvm_clear_async_pf_completion_queue(vcpu);
Christian Borntraeger59674c12012-01-11 11:20:33 +01002118 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2119 KVM_SYNC_GPRS |
Christian Borntraeger9eed07352012-02-06 10:59:07 +01002120 KVM_SYNC_ACRS |
David Hildenbrandb028ee32014-07-17 10:47:43 +02002121 KVM_SYNC_CRS |
2122 KVM_SYNC_ARCH0 |
2123 KVM_SYNC_PFAULT;
Julius Niedworok75a46152016-08-03 16:39:54 +02002124 kvm_s390_set_prefix(vcpu, 0);
Fan Zhangc6e5f162016-01-07 18:24:29 +08002125 if (test_kvm_facility(vcpu->kvm, 64))
2126 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002127 if (test_kvm_facility(vcpu->kvm, 133))
2128 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
David Hildenbrandf6aa6dc2016-01-15 14:11:46 +01002129 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2130 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2131 */
2132 if (MACHINE_HAS_VX)
Eric Farman68c55752014-06-09 10:57:26 -04002133 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
David Hildenbrand6fd8e672016-01-18 14:46:34 +01002134 else
2135 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
Dominik Dingeldafd0322014-12-02 16:53:21 +01002136
2137 if (kvm_is_ucontrol(vcpu->kvm))
2138 return __kvm_ucontrol_vcpu_init(vcpu);
2139
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002140 return 0;
2141}
2142
David Hildenbranddb0758b2016-02-15 09:42:25 +01002143/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2144static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2145{
2146 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002147 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002148 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002149 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002150}
2151
2152/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2153static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2154{
2155 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002156 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002157 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2158 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002159 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002160}
2161
2162/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2163static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2164{
2165 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2166 vcpu->arch.cputm_enabled = true;
2167 __start_cpu_timer_accounting(vcpu);
2168}
2169
2170/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2171static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2172{
2173 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2174 __stop_cpu_timer_accounting(vcpu);
2175 vcpu->arch.cputm_enabled = false;
2176}
2177
2178static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2179{
2180 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2181 __enable_cpu_timer_accounting(vcpu);
2182 preempt_enable();
2183}
2184
2185static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2186{
2187 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2188 __disable_cpu_timer_accounting(vcpu);
2189 preempt_enable();
2190}
2191
David Hildenbrand4287f242016-02-15 09:40:12 +01002192/* set the cpu timer - may only be called from the VCPU thread itself */
2193void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2194{
David Hildenbranddb0758b2016-02-15 09:42:25 +01002195 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01002196 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002197 if (vcpu->arch.cputm_enabled)
2198 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01002199 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002200 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002201 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01002202}
2203
David Hildenbranddb0758b2016-02-15 09:42:25 +01002204/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01002205__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2206{
David Hildenbrand9c23a132016-02-17 21:53:33 +01002207 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002208 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01002209
2210 if (unlikely(!vcpu->arch.cputm_enabled))
2211 return vcpu->arch.sie_block->cputm;
2212
David Hildenbrand9c23a132016-02-17 21:53:33 +01002213 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2214 do {
2215 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2216 /*
2217 * If the writer would ever execute a read in the critical
2218 * section, e.g. in irq context, we have a deadlock.
2219 */
2220 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2221 value = vcpu->arch.sie_block->cputm;
2222 /* if cputm_start is 0, accounting is being started/stopped */
2223 if (likely(vcpu->arch.cputm_start))
2224 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2225 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2226 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01002227 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01002228}
2229
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002230void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2231{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002232
David Hildenbrand37d9df92015-03-11 16:47:33 +01002233 gmap_enable(vcpu->arch.enabled_gmap);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002234 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand5ebda312016-02-22 13:52:27 +01002235 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002236 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01002237 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002238}
2239
2240void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2241{
David Hildenbrand01a745a2016-02-12 20:41:56 +01002242 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01002243 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01002244 __stop_cpu_timer_accounting(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002245 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand37d9df92015-03-11 16:47:33 +01002246 vcpu->arch.enabled_gmap = gmap_get_enabled();
2247 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02002248
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002249}
2250
2251static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2252{
2253 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2254 vcpu->arch.sie_block->gpsw.mask = 0UL;
2255 vcpu->arch.sie_block->gpsw.addr = 0UL;
Christian Borntraeger8d26cf72012-01-11 11:19:32 +01002256 kvm_s390_set_prefix(vcpu, 0);
David Hildenbrand4287f242016-02-15 09:40:12 +01002257 kvm_s390_set_cpu_timer(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002258 vcpu->arch.sie_block->ckc = 0UL;
2259 vcpu->arch.sie_block->todpr = 0;
2260 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2261 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2262 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002263 /* make sure the new fpc will be lazily loaded */
2264 save_fpu_regs();
2265 current->thread.fpu.fpc = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002266 vcpu->arch.sie_block->gbea = 1;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002267 vcpu->arch.sie_block->pp = 0;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002268 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2269 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002270 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2271 kvm_s390_vcpu_stop(vcpu);
Jens Freimann2ed10cc2014-02-11 13:48:07 +01002272 kvm_s390_clear_local_irqs(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002273}
2274
Dominik Dingel31928aa2014-12-04 15:47:07 +01002275void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002276{
Jason J. Herne72f25022014-11-25 09:46:02 -05002277 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02002278 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002279 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
Fan Zhangfdf03652015-05-13 10:58:41 +02002280 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05002281 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02002282 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01002283 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002284 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02002285 }
David Hildenbrand6502a342016-06-21 14:19:51 +02002286 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2287 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01002288 /* make vcpu_load load the right gmap on the first trigger */
2289 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02002290}
2291
Tony Krowiak5102ee82014-06-27 14:46:01 -04002292static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2293{
Michael Mueller9d8d5782015-02-02 15:42:51 +01002294 if (!test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04002295 return;
2296
Tony Krowiaka374e892014-09-03 10:13:53 +02002297 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2298
2299 if (vcpu->kvm->arch.crypto.aes_kw)
2300 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2301 if (vcpu->kvm->arch.crypto.dea_kw)
2302 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2303
Tony Krowiak5102ee82014-06-27 14:46:01 -04002304 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2305}
2306
Dominik Dingelb31605c2014-03-25 13:47:11 +01002307void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2308{
2309 free_page(vcpu->arch.sie_block->cbrlo);
2310 vcpu->arch.sie_block->cbrlo = 0;
2311}
2312
2313int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2314{
2315 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2316 if (!vcpu->arch.sie_block->cbrlo)
2317 return -ENOMEM;
2318
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002319 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
Dominik Dingelb31605c2014-03-25 13:47:11 +01002320 return 0;
2321}
2322
Michael Mueller91520f12015-02-27 14:32:11 +01002323static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2324{
2325 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2326
Michael Mueller91520f12015-02-27 14:32:11 +01002327 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01002328 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002329 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01002330}
2331
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002332int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2333{
Dominik Dingelb31605c2014-03-25 13:47:11 +01002334 int rc = 0;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002335
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01002336 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2337 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002338 CPUSTAT_STOPPED);
2339
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002340 if (test_kvm_facility(vcpu->kvm, 78))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002341 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01002342 else if (test_kvm_facility(vcpu->kvm, 8))
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002343 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02002344
Michael Mueller91520f12015-02-27 14:32:11 +01002345 kvm_s390_vcpu_setup_model(vcpu);
2346
David Hildenbrandbdab09f2016-04-12 11:07:49 +02002347 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2348 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002349 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01002350 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002351 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02002352 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002353 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002354
David Hildenbrand873b4252016-04-04 15:53:47 +02002355 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002356 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02002357 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002358 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2359 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02002360 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002361 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02002362 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002363 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002364 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002365 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002366 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002367 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01002368 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002369 vcpu->arch.sie_block->eca |= ECA_VX;
2370 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04002371 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01002372 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2373 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08002374 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05002375
2376 if (sclp.has_kss)
2377 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2378 else
2379 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05002380
Dominik Dingele6db1d62015-05-07 15:41:57 +02002381 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01002382 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2383 if (rc)
2384 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02002385 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01002386 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02002387 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002388
Tony Krowiak5102ee82014-06-27 14:46:01 -04002389 kvm_s390_vcpu_crypto_setup(vcpu);
2390
Dominik Dingelb31605c2014-03-25 13:47:11 +01002391 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002392}
2393
2394struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2395 unsigned int id)
2396{
Carsten Otte4d475552011-10-18 12:27:12 +02002397 struct kvm_vcpu *vcpu;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002398 struct sie_page *sie_page;
Carsten Otte4d475552011-10-18 12:27:12 +02002399 int rc = -EINVAL;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002400
David Hildenbrand42158252015-10-12 12:57:22 +02002401 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
Carsten Otte4d475552011-10-18 12:27:12 +02002402 goto out;
2403
2404 rc = -ENOMEM;
2405
Michael Muellerb110fea2013-06-12 13:54:54 +02002406 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002407 if (!vcpu)
Carsten Otte4d475552011-10-18 12:27:12 +02002408 goto out;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002409
QingFeng Haoda72ca42017-06-07 11:41:19 +02002410 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002411 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2412 if (!sie_page)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002413 goto out_free_cpu;
2414
Michael Mueller7feb6bb2013-06-28 13:30:24 +02002415 vcpu->arch.sie_block = &sie_page->sie_block;
2416 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2417
David Hildenbrandefed1102015-04-16 12:32:41 +02002418 /* the real guest size will always be smaller than msl */
2419 vcpu->arch.sie_block->mso = 0;
2420 vcpu->arch.sie_block->msl = sclp.hamax;
2421
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002422 vcpu->arch.sie_block->icpua = id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01002423 spin_lock_init(&vcpu->arch.local_int.lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002424 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
Christian Borntraegerd0321a22013-06-12 13:54:55 +02002425 vcpu->arch.local_int.wq = &vcpu->wq;
Christian Borntraeger5288fbf2008-03-25 18:47:31 +01002426 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002427 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002428
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002429 rc = kvm_vcpu_init(vcpu, kvm, id);
2430 if (rc)
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002431 goto out_free_sie_block;
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002432 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002433 vcpu->arch.sie_block);
Cornelia Huckade38c32012-07-23 17:20:30 +02002434 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002435
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002436 return vcpu;
Wei Yongjun7b06bf22010-03-09 14:37:53 +08002437out_free_sie_block:
2438 free_page((unsigned long)(vcpu->arch.sie_block));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002439out_free_cpu:
Michael Muellerb110fea2013-06-12 13:54:54 +02002440 kmem_cache_free(kvm_vcpu_cache, vcpu);
Carsten Otte4d475552011-10-18 12:27:12 +02002441out:
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002442 return ERR_PTR(rc);
2443}
2444
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002445int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2446{
David Hildenbrand9a022062014-08-05 17:40:47 +02002447 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002448}
2449
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002450bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2451{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08002452 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08002453}
2454
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002455void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002456{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002457 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002458 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002459}
2460
Christian Borntraeger27406cd2015-04-14 12:17:34 +02002461void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002462{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002463 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002464}
2465
Christian Borntraeger8e236542015-04-09 13:49:04 +02002466static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2467{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002468 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02002469 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002470}
2471
2472static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2473{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04002474 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002475}
2476
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002477/*
2478 * Kick a guest cpu out of SIE and wait until SIE is not running.
2479 * If the CPU is not running (e.g. waiting as idle) the function will
2480 * return immediately. */
2481void exit_sie(struct kvm_vcpu *vcpu)
2482{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002483 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002484 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2485 cpu_relax();
2486}
2487
Christian Borntraeger8e236542015-04-09 13:49:04 +02002488/* Kick a guest cpu out of SIE to process a request synchronously */
2489void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002490{
Christian Borntraeger8e236542015-04-09 13:49:04 +02002491 kvm_make_request(req, vcpu);
2492 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02002493}
2494
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002495static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2496 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002497{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002498 struct kvm *kvm = gmap->private;
2499 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002500 unsigned long prefix;
2501 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002502
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02002503 if (gmap_is_shadow(gmap))
2504 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002505 if (start >= 1UL << 31)
2506 /* We are only interested in prefix pages */
2507 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002508 kvm_for_each_vcpu(i, vcpu, kvm) {
2509 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01002510 prefix = kvm_s390_get_prefix(vcpu);
2511 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2512 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2513 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02002514 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002515 }
2516 }
2517}
2518
Christoffer Dallb6d33832012-03-08 16:44:24 -05002519int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2520{
2521 /* kvm common code refers to this, but never calls it */
2522 BUG();
2523 return 0;
2524}
2525
Carsten Otte14eebd92012-05-15 14:15:26 +02002526static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2527 struct kvm_one_reg *reg)
2528{
2529 int r = -EINVAL;
2530
2531 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002532 case KVM_REG_S390_TODPR:
2533 r = put_user(vcpu->arch.sie_block->todpr,
2534 (u32 __user *)reg->addr);
2535 break;
2536 case KVM_REG_S390_EPOCHDIFF:
2537 r = put_user(vcpu->arch.sie_block->epoch,
2538 (u64 __user *)reg->addr);
2539 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002540 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002541 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02002542 (u64 __user *)reg->addr);
2543 break;
2544 case KVM_REG_S390_CLOCK_COMP:
2545 r = put_user(vcpu->arch.sie_block->ckc,
2546 (u64 __user *)reg->addr);
2547 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002548 case KVM_REG_S390_PFTOKEN:
2549 r = put_user(vcpu->arch.pfault_token,
2550 (u64 __user *)reg->addr);
2551 break;
2552 case KVM_REG_S390_PFCOMPARE:
2553 r = put_user(vcpu->arch.pfault_compare,
2554 (u64 __user *)reg->addr);
2555 break;
2556 case KVM_REG_S390_PFSELECT:
2557 r = put_user(vcpu->arch.pfault_select,
2558 (u64 __user *)reg->addr);
2559 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002560 case KVM_REG_S390_PP:
2561 r = put_user(vcpu->arch.sie_block->pp,
2562 (u64 __user *)reg->addr);
2563 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002564 case KVM_REG_S390_GBEA:
2565 r = put_user(vcpu->arch.sie_block->gbea,
2566 (u64 __user *)reg->addr);
2567 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002568 default:
2569 break;
2570 }
2571
2572 return r;
2573}
2574
2575static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2576 struct kvm_one_reg *reg)
2577{
2578 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01002579 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02002580
2581 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02002582 case KVM_REG_S390_TODPR:
2583 r = get_user(vcpu->arch.sie_block->todpr,
2584 (u32 __user *)reg->addr);
2585 break;
2586 case KVM_REG_S390_EPOCHDIFF:
2587 r = get_user(vcpu->arch.sie_block->epoch,
2588 (u64 __user *)reg->addr);
2589 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02002590 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01002591 r = get_user(val, (u64 __user *)reg->addr);
2592 if (!r)
2593 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02002594 break;
2595 case KVM_REG_S390_CLOCK_COMP:
2596 r = get_user(vcpu->arch.sie_block->ckc,
2597 (u64 __user *)reg->addr);
2598 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02002599 case KVM_REG_S390_PFTOKEN:
2600 r = get_user(vcpu->arch.pfault_token,
2601 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02002602 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2603 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02002604 break;
2605 case KVM_REG_S390_PFCOMPARE:
2606 r = get_user(vcpu->arch.pfault_compare,
2607 (u64 __user *)reg->addr);
2608 break;
2609 case KVM_REG_S390_PFSELECT:
2610 r = get_user(vcpu->arch.pfault_select,
2611 (u64 __user *)reg->addr);
2612 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01002613 case KVM_REG_S390_PP:
2614 r = get_user(vcpu->arch.sie_block->pp,
2615 (u64 __user *)reg->addr);
2616 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01002617 case KVM_REG_S390_GBEA:
2618 r = get_user(vcpu->arch.sie_block->gbea,
2619 (u64 __user *)reg->addr);
2620 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02002621 default:
2622 break;
2623 }
2624
2625 return r;
2626}
Christoffer Dallb6d33832012-03-08 16:44:24 -05002627
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002628static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2629{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002630 kvm_s390_vcpu_initial_reset(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002631 return 0;
2632}
2633
2634int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2635{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002636 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002637 return 0;
2638}
2639
2640int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2641{
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01002642 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643 return 0;
2644}
2645
2646int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2647 struct kvm_sregs *sregs)
2648{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002649 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002650 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002651 return 0;
2652}
2653
2654int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2655 struct kvm_sregs *sregs)
2656{
Christian Borntraeger59674c12012-01-11 11:20:33 +01002657 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002658 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002659 return 0;
2660}
2661
2662int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2663{
Martin Schwidefsky4725c862013-10-15 16:08:34 +02002664 if (test_fp_ctl(fpu->fpc))
2665 return -EINVAL;
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002666 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002667 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002668 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2669 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002670 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002671 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002672 return 0;
2673}
2674
2675int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2676{
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002677 /* make sure we have the latest values */
2678 save_fpu_regs();
2679 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002680 convert_vx_to_fp((freg_t *) fpu->fprs,
2681 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01002682 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02002683 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01002684 fpu->fpc = vcpu->run->s.regs.fpc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002685 return 0;
2686}
2687
2688static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2689{
2690 int rc = 0;
2691
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02002692 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002693 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01002694 else {
2695 vcpu->run->psw_mask = psw.mask;
2696 vcpu->run->psw_addr = psw.addr;
2697 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002698 return rc;
2699}
2700
2701int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2702 struct kvm_translation *tr)
2703{
2704 return -EINVAL; /* not implemented yet */
2705}
2706
David Hildenbrand27291e22014-01-23 12:26:52 +01002707#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2708 KVM_GUESTDBG_USE_HW_BP | \
2709 KVM_GUESTDBG_ENABLE)
2710
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002711int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2712 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002713{
David Hildenbrand27291e22014-01-23 12:26:52 +01002714 int rc = 0;
2715
2716 vcpu->guest_debug = 0;
2717 kvm_s390_clear_bp_data(vcpu);
2718
David Hildenbrand2de3bfc2014-05-20 17:25:20 +02002719 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
David Hildenbrand27291e22014-01-23 12:26:52 +01002720 return -EINVAL;
David Hildenbrand89b5b4d2015-11-24 13:47:13 +01002721 if (!sclp.has_gpere)
2722 return -EINVAL;
David Hildenbrand27291e22014-01-23 12:26:52 +01002723
2724 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2725 vcpu->guest_debug = dbg->control;
2726 /* enforce guest PER */
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002727 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002728
2729 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2730 rc = kvm_s390_import_bp_data(vcpu, dbg);
2731 } else {
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002732 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002733 vcpu->arch.guestdbg.last_bp = 0;
2734 }
2735
2736 if (rc) {
2737 vcpu->guest_debug = 0;
2738 kvm_s390_clear_bp_data(vcpu);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002739 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand27291e22014-01-23 12:26:52 +01002740 }
2741
2742 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002743}
2744
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002745int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2746 struct kvm_mp_state *mp_state)
2747{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002748 /* CHECK_STOP and LOAD are not supported yet */
2749 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2750 KVM_MP_STATE_OPERATING;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002751}
2752
2753int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2754 struct kvm_mp_state *mp_state)
2755{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02002756 int rc = 0;
2757
2758 /* user space knows about this interface - let it control the state */
2759 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2760
2761 switch (mp_state->mp_state) {
2762 case KVM_MP_STATE_STOPPED:
2763 kvm_s390_vcpu_stop(vcpu);
2764 break;
2765 case KVM_MP_STATE_OPERATING:
2766 kvm_s390_vcpu_start(vcpu);
2767 break;
2768 case KVM_MP_STATE_LOAD:
2769 case KVM_MP_STATE_CHECK_STOP:
2770 /* fall through - CHECK_STOP and LOAD are not supported yet */
2771 default:
2772 rc = -ENXIO;
2773 }
2774
2775 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03002776}
2777
David Hildenbrand8ad35752014-03-14 11:00:21 +01002778static bool ibs_enabled(struct kvm_vcpu *vcpu)
2779{
2780 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2781}
2782
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002783static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2784{
David Hildenbrand8ad35752014-03-14 11:00:21 +01002785retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02002786 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02002787 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02002788 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002789 /*
2790 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002791 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002792 * This ensures that the ipte instruction for this request has
2793 * already finished. We might race against a second unmapper that
2794 * wants to set the blocking bit. Lets just retry the request loop.
2795 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01002796 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002797 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01002798 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2799 kvm_s390_get_prefix(vcpu),
2800 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02002801 if (rc) {
2802 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002803 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02002804 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002805 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002806 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01002807
David Hildenbrandd3d692c2014-07-29 08:53:36 +02002808 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2809 vcpu->arch.sie_block->ihcpu = 0xffff;
2810 goto retry;
2811 }
2812
David Hildenbrand8ad35752014-03-14 11:00:21 +01002813 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2814 if (!ibs_enabled(vcpu)) {
2815 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002816 atomic_or(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002817 &vcpu->arch.sie_block->cpuflags);
2818 }
2819 goto retry;
2820 }
2821
2822 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2823 if (ibs_enabled(vcpu)) {
2824 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
Peter Zijlstra805de8f42015-04-24 01:12:32 +02002825 atomic_andnot(CPUSTAT_IBS,
David Hildenbrand8ad35752014-03-14 11:00:21 +01002826 &vcpu->arch.sie_block->cpuflags);
2827 }
2828 goto retry;
2829 }
2830
David Hildenbrand6502a342016-06-21 14:19:51 +02002831 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
2832 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
2833 goto retry;
2834 }
2835
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02002836 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
2837 /*
2838 * Disable CMMA virtualization; we will emulate the ESSA
2839 * instruction manually, in order to provide additional
2840 * functionalities needed for live migration.
2841 */
2842 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
2843 goto retry;
2844 }
2845
2846 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
2847 /*
2848 * Re-enable CMMA virtualization if CMMA is available and
2849 * was used.
2850 */
2851 if ((vcpu->kvm->arch.use_cmma) &&
2852 (vcpu->kvm->mm->context.use_cmma))
2853 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
2854 goto retry;
2855 }
2856
David Hildenbrand0759d062014-05-13 16:54:32 +02002857 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02002858 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02002859
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002860 return 0;
2861}
2862
David Hildenbrand25ed1672015-05-12 09:49:14 +02002863void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2864{
2865 struct kvm_vcpu *vcpu;
2866 int i;
2867
2868 mutex_lock(&kvm->lock);
2869 preempt_disable();
2870 kvm->arch.epoch = tod - get_tod_clock();
2871 kvm_s390_vcpu_block_all(kvm);
2872 kvm_for_each_vcpu(i, vcpu, kvm)
2873 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2874 kvm_s390_vcpu_unblock_all(kvm);
2875 preempt_enable();
2876 mutex_unlock(&kvm->lock);
2877}
2878
Thomas Huthfa576c52014-05-06 17:20:16 +02002879/**
2880 * kvm_arch_fault_in_page - fault-in guest page if necessary
2881 * @vcpu: The corresponding virtual cpu
2882 * @gpa: Guest physical address
2883 * @writable: Whether the page should be writable or not
2884 *
2885 * Make sure that a guest page has been faulted-in on the host.
2886 *
2887 * Return: Zero on success, negative error code otherwise.
2888 */
2889long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002890{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02002891 return gmap_fault(vcpu->arch.gmap, gpa,
2892 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002893}
2894
Dominik Dingel3c038e62013-10-07 17:11:48 +02002895static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2896 unsigned long token)
2897{
2898 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02002899 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002900
2901 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02002902 irq.u.ext.ext_params2 = token;
2903 irq.type = KVM_S390_INT_PFAULT_INIT;
2904 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02002905 } else {
2906 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02002907 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02002908 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2909 }
2910}
2911
2912void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2913 struct kvm_async_pf *work)
2914{
2915 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2916 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2917}
2918
2919void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2920 struct kvm_async_pf *work)
2921{
2922 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2923 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2924}
2925
2926void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2927 struct kvm_async_pf *work)
2928{
2929 /* s390 will always inject the page directly */
2930}
2931
2932bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2933{
2934 /*
2935 * s390 will always inject the page directly,
2936 * but we still want check_async_completion to cleanup
2937 */
2938 return true;
2939}
2940
2941static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2942{
2943 hva_t hva;
2944 struct kvm_arch_async_pf arch;
2945 int rc;
2946
2947 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2948 return 0;
2949 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2950 vcpu->arch.pfault_compare)
2951 return 0;
2952 if (psw_extint_disabled(vcpu))
2953 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02002954 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002955 return 0;
2956 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2957 return 0;
2958 if (!vcpu->arch.gmap->pfault_enabled)
2959 return 0;
2960
Heiko Carstens81480cc2014-01-01 16:36:07 +01002961 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2962 hva += current->thread.gmap_addr & ~PAGE_MASK;
2963 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02002964 return 0;
2965
2966 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2967 return rc;
2968}
2969
Thomas Huth3fb4c402013-09-12 10:33:43 +02002970static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002971{
Thomas Huth3fb4c402013-09-12 10:33:43 +02002972 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01002973
Dominik Dingel3c038e62013-10-07 17:11:48 +02002974 /*
2975 * On s390 notifications for arriving pages will be delivered directly
2976 * to the guest but the house keeping for completed pfaults is
2977 * handled outside the worker.
2978 */
2979 kvm_check_async_pf_completion(vcpu);
2980
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01002981 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2982 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002983
2984 if (need_resched())
2985 schedule();
2986
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02002987 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02002988 s390_handle_mcck();
2989
Jens Freimann79395032014-04-17 10:10:30 +02002990 if (!kvm_is_ucontrol(vcpu->kvm)) {
2991 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2992 if (rc)
2993 return rc;
2994 }
Carsten Otte0ff31862008-05-21 13:37:37 +02002995
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002996 rc = kvm_s390_handle_requests(vcpu);
2997 if (rc)
2998 return rc;
2999
David Hildenbrand27291e22014-01-23 12:26:52 +01003000 if (guestdbg_enabled(vcpu)) {
3001 kvm_s390_backup_guest_per_regs(vcpu);
3002 kvm_s390_patch_guest_per_regs(vcpu);
3003 }
3004
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003005 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003006 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3007 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3008 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003009
Thomas Huth3fb4c402013-09-12 10:33:43 +02003010 return 0;
3011}
3012
Thomas Huth492d8642015-02-10 16:11:01 +01003013static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3014{
David Hildenbrand56317922016-01-12 17:37:58 +01003015 struct kvm_s390_pgm_info pgm_info = {
3016 .code = PGM_ADDRESSING,
3017 };
3018 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01003019 int rc;
3020
3021 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3022 trace_kvm_s390_sie_fault(vcpu);
3023
3024 /*
3025 * We want to inject an addressing exception, which is defined as a
3026 * suppressing or terminating exception. However, since we came here
3027 * by a DAT access exception, the PSW still points to the faulting
3028 * instruction since DAT exceptions are nullifying. So we've got
3029 * to look up the current opcode to get the length of the instruction
3030 * to be able to forward the PSW.
3031 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02003032 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01003033 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01003034 if (rc < 0) {
3035 return rc;
3036 } else if (rc) {
3037 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3038 * Forward by arbitrary ilc, injection will take care of
3039 * nullification if necessary.
3040 */
3041 pgm_info = vcpu->arch.pgm;
3042 ilen = 4;
3043 }
David Hildenbrand56317922016-01-12 17:37:58 +01003044 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3045 kvm_s390_forward_psw(vcpu, ilen);
3046 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01003047}
3048
Thomas Huth3fb4c402013-09-12 10:33:43 +02003049static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3050{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003051 struct mcck_volatile_info *mcck_info;
3052 struct sie_page *sie_page;
3053
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02003054 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3055 vcpu->arch.sie_block->icptcode);
3056 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3057
David Hildenbrand27291e22014-01-23 12:26:52 +01003058 if (guestdbg_enabled(vcpu))
3059 kvm_s390_restore_guest_per_regs(vcpu);
3060
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003061 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3062 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003063
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02003064 if (exit_reason == -EINTR) {
3065 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3066 sie_page = container_of(vcpu->arch.sie_block,
3067 struct sie_page, sie_block);
3068 mcck_info = &sie_page->mcck_info;
3069 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3070 return 0;
3071 }
3072
David Hildenbrand71f116b2015-10-19 16:24:28 +02003073 if (vcpu->arch.sie_block->icptcode > 0) {
3074 int rc = kvm_handle_sie_intercept(vcpu);
3075
3076 if (rc != -EOPNOTSUPP)
3077 return rc;
3078 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3079 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3080 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3081 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3082 return -EREMOTE;
3083 } else if (exit_reason != -EFAULT) {
3084 vcpu->stat.exit_null++;
3085 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02003086 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3087 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3088 vcpu->run->s390_ucontrol.trans_exc_code =
3089 current->thread.gmap_addr;
3090 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003091 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003092 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02003093 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003094 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02003095 if (kvm_arch_setup_async_pf(vcpu))
3096 return 0;
3097 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003098 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02003099 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003100}
3101
3102static int __vcpu_run(struct kvm_vcpu *vcpu)
3103{
3104 int rc, exit_reason;
3105
Thomas Huth800c1062013-09-12 10:33:45 +02003106 /*
3107 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3108 * ning the guest), so that memslots (and other stuff) are protected
3109 */
3110 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3111
Thomas Hutha76ccff2013-09-12 10:33:44 +02003112 do {
3113 rc = vcpu_pre_run(vcpu);
3114 if (rc)
3115 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02003116
Thomas Huth800c1062013-09-12 10:33:45 +02003117 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02003118 /*
3119 * As PF_VCPU will be used in fault handler, between
3120 * guest_enter and guest_exit should be no uaccess.
3121 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02003122 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003123 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003124 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003125 local_irq_enable();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003126 exit_reason = sie64a(vcpu->arch.sie_block,
3127 vcpu->run->s.regs.gprs);
Christian Borntraeger0097d122015-04-30 13:43:30 +02003128 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003129 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02003130 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02003131 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02003132 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003133
Thomas Hutha76ccff2013-09-12 10:33:44 +02003134 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01003135 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02003136
Thomas Huth800c1062013-09-12 10:33:45 +02003137 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01003138 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003139}
3140
David Hildenbrandb028ee32014-07-17 10:47:43 +02003141static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3142{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003143 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003144 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003145
3146 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003147 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02003148 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3149 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3150 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3151 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3152 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3153 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003154 /* some control register changes require a tlb flush */
3155 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003156 }
3157 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrand4287f242016-02-15 09:40:12 +01003158 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003159 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3160 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3161 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3162 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3163 }
3164 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3165 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3166 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3167 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003168 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3169 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003170 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003171 /*
3172 * If userspace sets the riccb (e.g. after migration) to a valid state,
3173 * we should enable RI here instead of doing the lazy enablement.
3174 */
3175 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003176 test_kvm_facility(vcpu->kvm, 64) &&
3177 riccb->valid &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003178 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01003179 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003180 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02003181 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003182 /*
3183 * If userspace sets the gscb (e.g. after migration) to non-zero,
3184 * we should enable GS here instead of doing the lazy enablement.
3185 */
3186 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3187 test_kvm_facility(vcpu->kvm, 133) &&
3188 gscb->gssm &&
3189 !vcpu->arch.gs_enabled) {
3190 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3191 vcpu->arch.sie_block->ecb |= ECB_GS;
3192 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3193 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02003194 }
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003195 save_access_regs(vcpu->arch.host_acrs);
3196 restore_access_regs(vcpu->run->s.regs.acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003197 /* save host (userspace) fprs/vrs */
3198 save_fpu_regs();
3199 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3200 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3201 if (MACHINE_HAS_VX)
3202 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3203 else
3204 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3205 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3206 if (test_fp_ctl(current->thread.fpu.fpc))
3207 /* User space provided an invalid FPC, let's clear it */
3208 current->thread.fpu.fpc = 0;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003209 if (MACHINE_HAS_GS) {
3210 preempt_disable();
3211 __ctl_set_bit(2, 4);
3212 if (current->thread.gs_cb) {
3213 vcpu->arch.host_gscb = current->thread.gs_cb;
3214 save_gs_cb(vcpu->arch.host_gscb);
3215 }
3216 if (vcpu->arch.gs_enabled) {
3217 current->thread.gs_cb = (struct gs_cb *)
3218 &vcpu->run->s.regs.gscb;
3219 restore_gs_cb(current->thread.gs_cb);
3220 }
3221 preempt_enable();
3222 }
Fan Zhang80cd8762016-08-15 04:53:22 +02003223
David Hildenbrandb028ee32014-07-17 10:47:43 +02003224 kvm_run->kvm_dirty_regs = 0;
3225}
3226
3227static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3228{
3229 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3230 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3231 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3232 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
David Hildenbrand4287f242016-02-15 09:40:12 +01003233 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003234 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3235 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3236 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3237 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3238 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3239 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3240 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003241 save_access_regs(vcpu->run->s.regs.acrs);
3242 restore_access_regs(vcpu->arch.host_acrs);
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003243 /* Save guest register state */
3244 save_fpu_regs();
3245 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3246 /* Restore will be done lazily at return */
3247 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3248 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003249 if (MACHINE_HAS_GS) {
3250 __ctl_set_bit(2, 4);
3251 if (vcpu->arch.gs_enabled)
3252 save_gs_cb(current->thread.gs_cb);
3253 preempt_disable();
3254 current->thread.gs_cb = vcpu->arch.host_gscb;
3255 restore_gs_cb(vcpu->arch.host_gscb);
3256 preempt_enable();
3257 if (!vcpu->arch.host_gscb)
3258 __ctl_clear_bit(2, 4);
3259 vcpu->arch.host_gscb = NULL;
3260 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003261
David Hildenbrandb028ee32014-07-17 10:47:43 +02003262}
3263
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003264int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3265{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003266 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003267 sigset_t sigsaved;
3268
Paolo Bonzini460df4c2017-02-08 11:50:15 +01003269 if (kvm_run->immediate_exit)
3270 return -EINTR;
3271
David Hildenbrand27291e22014-01-23 12:26:52 +01003272 if (guestdbg_exit_pending(vcpu)) {
3273 kvm_s390_prepare_debug_exit(vcpu);
3274 return 0;
3275 }
3276
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003277 if (vcpu->sigset_active)
3278 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
3279
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003280 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3281 kvm_s390_vcpu_start(vcpu);
3282 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003283 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003284 vcpu->vcpu_id);
3285 return -EINVAL;
3286 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003287
David Hildenbrandb028ee32014-07-17 10:47:43 +02003288 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003289 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003290
Heiko Carstensdab4079d2009-06-12 10:26:32 +02003291 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02003292 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02003293
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003294 if (signal_pending(current) && !rc) {
3295 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003296 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02003297 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003298
David Hildenbrand27291e22014-01-23 12:26:52 +01003299 if (guestdbg_exit_pending(vcpu) && !rc) {
3300 kvm_s390_prepare_debug_exit(vcpu);
3301 rc = 0;
3302 }
3303
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003304 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02003305 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01003306 rc = 0;
3307 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308
David Hildenbranddb0758b2016-02-15 09:42:25 +01003309 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02003310 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003311
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003312 if (vcpu->sigset_active)
3313 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
3314
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003315 vcpu->stat.exit_userspace++;
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02003316 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003317}
3318
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003319/*
3320 * store status at address
3321 * we use have two special cases:
3322 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3323 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3324 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01003325int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003326{
Carsten Otte092670c2011-07-24 10:48:22 +02003327 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003328 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02003329 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01003330 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003331 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003332
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003333 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01003334 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3335 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003336 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003337 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01003338 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3339 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003340 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003341 gpa = px;
3342 } else
3343 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003344
3345 /* manually convert vector registers if necessary */
3346 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01003347 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003348 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3349 fprs, 128);
3350 } else {
3351 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01003352 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003353 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003354 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003355 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003356 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003357 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003358 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02003359 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003360 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003361 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003362 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003363 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01003364 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003365 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01003366 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01003367 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003368 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003369 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003370 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003371 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02003372 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01003373 &vcpu->arch.sie_block->gcr, 128);
3374 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003375}
3376
Thomas Huthe8798922013-11-06 15:46:33 +01003377int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3378{
3379 /*
3380 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01003381 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01003382 * it into the save area
3383 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02003384 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003385 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01003386 save_access_regs(vcpu->run->s.regs.acrs);
3387
3388 return kvm_s390_store_status_unloaded(vcpu, addr);
3389}
3390
David Hildenbrand8ad35752014-03-14 11:00:21 +01003391static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3392{
3393 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003394 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003395}
3396
3397static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3398{
3399 unsigned int i;
3400 struct kvm_vcpu *vcpu;
3401
3402 kvm_for_each_vcpu(i, vcpu, kvm) {
3403 __disable_ibs_on_vcpu(vcpu);
3404 }
3405}
3406
3407static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3408{
David Hildenbrand09a400e2016-04-04 15:57:08 +02003409 if (!sclp.has_ibs)
3410 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01003411 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003412 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003413}
3414
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003415void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3416{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003417 int i, online_vcpus, started_vcpus = 0;
3418
3419 if (!is_vcpu_stopped(vcpu))
3420 return;
3421
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003422 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003423 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003424 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003425 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3426
3427 for (i = 0; i < online_vcpus; i++) {
3428 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3429 started_vcpus++;
3430 }
3431
3432 if (started_vcpus == 0) {
3433 /* we're the only active VCPU -> speed it up */
3434 __enable_ibs_on_vcpu(vcpu);
3435 } else if (started_vcpus == 1) {
3436 /*
3437 * As we are starting a second VCPU, we have to disable
3438 * the IBS facility on all VCPUs to remove potentially
3439 * oustanding ENABLE requests.
3440 */
3441 __disable_ibs_on_all_vcpus(vcpu->kvm);
3442 }
3443
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003444 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003445 /*
3446 * Another VCPU might have used IBS while we were offline.
3447 * Let's play safe and flush the VCPU at startup.
3448 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003449 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003450 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003451 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003452}
3453
3454void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3455{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003456 int i, online_vcpus, started_vcpus = 0;
3457 struct kvm_vcpu *started_vcpu = NULL;
3458
3459 if (is_vcpu_stopped(vcpu))
3460 return;
3461
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003462 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003463 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003464 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003465 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3466
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003467 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02003468 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02003469
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003470 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003471 __disable_ibs_on_vcpu(vcpu);
3472
3473 for (i = 0; i < online_vcpus; i++) {
3474 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3475 started_vcpus++;
3476 started_vcpu = vcpu->kvm->vcpus[i];
3477 }
3478 }
3479
3480 if (started_vcpus == 1) {
3481 /*
3482 * As we only have one VCPU left, we want to enable the
3483 * IBS facility for that VCPU to speed it up.
3484 */
3485 __enable_ibs_on_vcpu(started_vcpu);
3486 }
3487
David Hildenbrand433b9ee2014-05-06 16:11:14 +02003488 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003489 return;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01003490}
3491
Cornelia Huckd6712df2012-12-20 15:32:11 +01003492static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3493 struct kvm_enable_cap *cap)
3494{
3495 int r;
3496
3497 if (cap->flags)
3498 return -EINVAL;
3499
3500 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003501 case KVM_CAP_S390_CSS_SUPPORT:
3502 if (!vcpu->kvm->arch.css_support) {
3503 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02003504 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01003505 trace_kvm_s390_enable_css(vcpu->kvm);
3506 }
3507 r = 0;
3508 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01003509 default:
3510 r = -EINVAL;
3511 break;
3512 }
3513 return r;
3514}
3515
Thomas Huth41408c282015-02-06 15:01:21 +01003516static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3517 struct kvm_s390_mem_op *mop)
3518{
3519 void __user *uaddr = (void __user *)mop->buf;
3520 void *tmpbuf = NULL;
3521 int r, srcu_idx;
3522 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3523 | KVM_S390_MEMOP_F_CHECK_ONLY;
3524
3525 if (mop->flags & ~supported_flags)
3526 return -EINVAL;
3527
3528 if (mop->size > MEM_OP_MAX_SIZE)
3529 return -E2BIG;
3530
3531 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3532 tmpbuf = vmalloc(mop->size);
3533 if (!tmpbuf)
3534 return -ENOMEM;
3535 }
3536
3537 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3538
3539 switch (mop->op) {
3540 case KVM_S390_MEMOP_LOGICAL_READ:
3541 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003542 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3543 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01003544 break;
3545 }
3546 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3547 if (r == 0) {
3548 if (copy_to_user(uaddr, tmpbuf, mop->size))
3549 r = -EFAULT;
3550 }
3551 break;
3552 case KVM_S390_MEMOP_LOGICAL_WRITE:
3553 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01003554 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3555 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01003556 break;
3557 }
3558 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3559 r = -EFAULT;
3560 break;
3561 }
3562 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3563 break;
3564 default:
3565 r = -EINVAL;
3566 }
3567
3568 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3569
3570 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3571 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3572
3573 vfree(tmpbuf);
3574 return r;
3575}
3576
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003577long kvm_arch_vcpu_ioctl(struct file *filp,
3578 unsigned int ioctl, unsigned long arg)
3579{
3580 struct kvm_vcpu *vcpu = filp->private_data;
3581 void __user *argp = (void __user *)arg;
Thomas Huth800c1062013-09-12 10:33:45 +02003582 int idx;
Avi Kivitybc923cc2010-05-13 12:21:46 +03003583 long r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003584
Avi Kivity93736622010-05-13 12:35:17 +03003585 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01003586 case KVM_S390_IRQ: {
3587 struct kvm_s390_irq s390irq;
3588
3589 r = -EFAULT;
3590 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3591 break;
3592 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3593 break;
3594 }
Avi Kivity93736622010-05-13 12:35:17 +03003595 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01003596 struct kvm_s390_interrupt s390int;
Jens Freimann383d0b02014-07-29 15:11:49 +02003597 struct kvm_s390_irq s390irq;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003598
Avi Kivity93736622010-05-13 12:35:17 +03003599 r = -EFAULT;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003600 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Avi Kivity93736622010-05-13 12:35:17 +03003601 break;
Jens Freimann383d0b02014-07-29 15:11:49 +02003602 if (s390int_to_s390irq(&s390int, &s390irq))
3603 return -EINVAL;
3604 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
Avi Kivity93736622010-05-13 12:35:17 +03003605 break;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003606 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003607 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02003608 idx = srcu_read_lock(&vcpu->kvm->srcu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003609 r = kvm_s390_vcpu_store_status(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02003610 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03003611 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003612 case KVM_S390_SET_INITIAL_PSW: {
3613 psw_t psw;
3614
Avi Kivitybc923cc2010-05-13 12:21:46 +03003615 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003616 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03003617 break;
3618 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3619 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003620 }
3621 case KVM_S390_INITIAL_RESET:
Avi Kivitybc923cc2010-05-13 12:21:46 +03003622 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3623 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003624 case KVM_SET_ONE_REG:
3625 case KVM_GET_ONE_REG: {
3626 struct kvm_one_reg reg;
3627 r = -EFAULT;
3628 if (copy_from_user(&reg, argp, sizeof(reg)))
3629 break;
3630 if (ioctl == KVM_SET_ONE_REG)
3631 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3632 else
3633 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3634 break;
3635 }
Carsten Otte27e03932012-01-04 10:25:21 +01003636#ifdef CONFIG_KVM_S390_UCONTROL
3637 case KVM_S390_UCAS_MAP: {
3638 struct kvm_s390_ucas_mapping ucasmap;
3639
3640 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3641 r = -EFAULT;
3642 break;
3643 }
3644
3645 if (!kvm_is_ucontrol(vcpu->kvm)) {
3646 r = -EINVAL;
3647 break;
3648 }
3649
3650 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3651 ucasmap.vcpu_addr, ucasmap.length);
3652 break;
3653 }
3654 case KVM_S390_UCAS_UNMAP: {
3655 struct kvm_s390_ucas_mapping ucasmap;
3656
3657 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3658 r = -EFAULT;
3659 break;
3660 }
3661
3662 if (!kvm_is_ucontrol(vcpu->kvm)) {
3663 r = -EINVAL;
3664 break;
3665 }
3666
3667 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3668 ucasmap.length);
3669 break;
3670 }
3671#endif
Carsten Otteccc79102012-01-04 10:25:26 +01003672 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003673 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01003674 break;
3675 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01003676 case KVM_ENABLE_CAP:
3677 {
3678 struct kvm_enable_cap cap;
3679 r = -EFAULT;
3680 if (copy_from_user(&cap, argp, sizeof(cap)))
3681 break;
3682 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3683 break;
3684 }
Thomas Huth41408c282015-02-06 15:01:21 +01003685 case KVM_S390_MEM_OP: {
3686 struct kvm_s390_mem_op mem_op;
3687
3688 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3689 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3690 else
3691 r = -EFAULT;
3692 break;
3693 }
Jens Freimann816c7662014-11-24 17:13:46 +01003694 case KVM_S390_SET_IRQ_STATE: {
3695 struct kvm_s390_irq_state irq_state;
3696
3697 r = -EFAULT;
3698 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3699 break;
3700 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3701 irq_state.len == 0 ||
3702 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3703 r = -EINVAL;
3704 break;
3705 }
3706 r = kvm_s390_set_irq_state(vcpu,
3707 (void __user *) irq_state.buf,
3708 irq_state.len);
3709 break;
3710 }
3711 case KVM_S390_GET_IRQ_STATE: {
3712 struct kvm_s390_irq_state irq_state;
3713
3714 r = -EFAULT;
3715 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3716 break;
3717 if (irq_state.len == 0) {
3718 r = -EINVAL;
3719 break;
3720 }
3721 r = kvm_s390_get_irq_state(vcpu,
3722 (__u8 __user *) irq_state.buf,
3723 irq_state.len);
3724 break;
3725 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003726 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01003727 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003728 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03003729 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003730}
3731
Carsten Otte5b1c1492012-01-04 10:25:23 +01003732int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3733{
3734#ifdef CONFIG_KVM_S390_UCONTROL
3735 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3736 && (kvm_is_ucontrol(vcpu->kvm))) {
3737 vmf->page = virt_to_page(vcpu->arch.sie_block);
3738 get_page(vmf->page);
3739 return 0;
3740 }
3741#endif
3742 return VM_FAULT_SIGBUS;
3743}
3744
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05303745int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3746 unsigned long npages)
Takuya Yoshikawadb3fe4e2012-02-08 13:02:18 +09003747{
3748 return 0;
3749}
3750
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003751/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003752int kvm_arch_prepare_memory_region(struct kvm *kvm,
3753 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003754 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09003755 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003756{
Nick Wangdd2887e2013-03-25 17:22:57 +01003757 /* A few sanity checks. We can have memory slots which have to be
3758 located/ended at a segment boundary (1MB). The memory in userland is
3759 ok to be fragmented into various different vmas. It is okay to mmap()
3760 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003761
Carsten Otte598841c2011-07-24 10:48:21 +02003762 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003763 return -EINVAL;
3764
Carsten Otte598841c2011-07-24 10:48:21 +02003765 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003766 return -EINVAL;
3767
Dominik Dingela3a92c32014-12-01 17:24:42 +01003768 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3769 return -EINVAL;
3770
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003771 return 0;
3772}
3773
3774void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02003775 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003776 const struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02003777 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09003778 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003779{
Carsten Ottef7850c92011-07-24 10:48:23 +02003780 int rc;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02003781
Christian Borntraeger2cef4de2013-03-25 17:22:48 +01003782 /* If the basics of the memslot do not change, we do not want
3783 * to update the gmap. Every update causes several unnecessary
3784 * segment translation exceptions. This is usually handled just
3785 * fine by the normal fault handler + gmap, but it will also
3786 * cause faults on the prefix page of running guest CPUs.
3787 */
3788 if (old->userspace_addr == mem->userspace_addr &&
3789 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3790 old->npages * PAGE_SIZE == mem->memory_size)
3791 return;
Carsten Otte598841c2011-07-24 10:48:21 +02003792
3793 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3794 mem->guest_phys_addr, mem->memory_size);
3795 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02003796 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02003797 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003798}
3799
Alexander Yarygin60a37702016-04-01 15:38:57 +03003800static inline unsigned long nonhyp_mask(int i)
3801{
3802 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3803
3804 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3805}
3806
Christian Borntraeger3491caf2016-05-13 12:16:35 +02003807void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3808{
3809 vcpu->valid_wakeup = false;
3810}
3811
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003812static int __init kvm_s390_init(void)
3813{
Alexander Yarygin60a37702016-04-01 15:38:57 +03003814 int i;
3815
David Hildenbrand07197fd2015-01-30 16:01:38 +01003816 if (!sclp.has_sief2) {
3817 pr_info("SIE not available\n");
3818 return -ENODEV;
3819 }
3820
Alexander Yarygin60a37702016-04-01 15:38:57 +03003821 for (i = 0; i < 16; i++)
3822 kvm_s390_fac_list_mask[i] |=
3823 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3824
Michael Mueller9d8d5782015-02-02 15:42:51 +01003825 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003826}
3827
3828static void __exit kvm_s390_exit(void)
3829{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003830 kvm_exit();
3831}
3832
3833module_init(kvm_s390_init);
3834module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02003835
3836/*
3837 * Enable autoloading of the kvm module.
3838 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3839 * since x86 takes a different approach.
3840 */
3841#include <linux/miscdevice.h>
3842MODULE_ALIAS_MISCDEV(KVM_MINOR);
3843MODULE_ALIAS("devname:kvm");