blob: f6268dfb83623d0945357c4de404ee635a6fe1e6 [file] [log] [blame]
Greg Kroah-Hartmand809aa22017-11-24 15:00:33 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002/*
Christian Borntraegerbb64da92017-11-21 16:02:52 +01003 * hosting IBM Z kernel virtual machines (s390x)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004 *
Janosch Frank3e6c5562019-10-02 04:46:58 -04005 * Copyright IBM Corp. 2008, 2020
Heiko Carstensb0c632d2008-03-25 18:47:20 +01006 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
Christian Ehrhardt628eb9b2009-05-25 13:40:51 +020010 * Christian Ehrhardt <ehrhardt@de.ibm.com>
Jason J. Herne15f36eb2012-08-02 10:10:17 -040011 * Jason J. Herne <jjherne@us.ibm.com>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010012 */
13
Michael Mueller7aedd9d2018-12-03 10:20:22 +010014#define KMSG_COMPONENT "kvm-s390"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
Heiko Carstensb0c632d2008-03-25 18:47:20 +010017#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
Christian Borntraegerca872302009-05-12 17:21:49 +020020#include <linux/hrtimer.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010021#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +010024#include <linux/mman.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010025#include <linux/module.h>
Paul Gortmakerd3217962017-02-09 15:20:25 -050026#include <linux/moduleparam.h>
Tony Krowiaka374e892014-09-03 10:13:53 +020027#include <linux/random.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010028#include <linux/slab.h>
Carsten Otteba5c1e92008-03-25 18:47:26 +010029#include <linux/timer.h>
Thomas Huth41408c282015-02-06 15:01:21 +010030#include <linux/vmalloc.h>
David Hildenbrand15c97052015-03-19 17:36:43 +010031#include <linux/bitmap.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010032#include <linux/sched/signal.h>
Claudio Imbrenda190df4a2016-08-04 17:54:42 +020033#include <linux/string.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010034
Heiko Carstenscbb870c2010-02-26 22:37:43 +010035#include <asm/asm-offsets.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010036#include <asm/lowcore.h>
Martin Schwidefskyfd5ada02016-05-31 15:06:51 +020037#include <asm/stp.h>
Heiko Carstensb0c632d2008-03-25 18:47:20 +010038#include <asm/pgtable.h>
Martin Schwidefsky1e133ab2016-03-08 11:49:57 +010039#include <asm/gmap.h>
Heiko Carstensf5daba12009-03-26 15:24:01 +010040#include <asm/nmi.h>
David Howellsa0616cd2012-03-28 18:30:02 +010041#include <asm/switch_to.h>
Jens Freimann6d3da242013-07-03 15:18:35 +020042#include <asm/isc.h>
Christian Borntraeger1526bf92012-05-15 14:15:25 +020043#include <asm/sclp.h>
David Hildenbrand0a763c72016-05-18 16:03:47 +020044#include <asm/cpacf.h>
Linus Torvalds221bb8a2016-08-02 16:11:27 -040045#include <asm/timex.h>
Tony Krowiake585b242018-09-25 19:16:18 -040046#include <asm/ap.h>
Janosch Frank29b40f12019-09-30 04:19:18 -040047#include <asm/uv.h>
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010048#include "kvm-s390.h"
Heiko Carstensb0c632d2008-03-25 18:47:20 +010049#include "gaccess.h"
50
Cornelia Huck5786fff2012-07-23 17:20:29 +020051#define CREATE_TRACE_POINTS
52#include "trace.h"
Cornelia Huckade38c32012-07-23 17:20:30 +020053#include "trace-s390.h"
Cornelia Huck5786fff2012-07-23 17:20:29 +020054
Thomas Huth41408c282015-02-06 15:01:21 +010055#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
Jens Freimann816c7662014-11-24 17:13:46 +010056#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
Thomas Huth41408c282015-02-06 15:01:21 +010059
Heiko Carstensb0c632d2008-03-25 18:47:20 +010060#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
Christian Borntraegerccc40c52018-03-08 12:48:05 +000061#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
Heiko Carstensb0c632d2008-03-25 18:47:20 +010062
63struct kvm_stats_debugfs_item debugfs_entries[] = {
64 { "userspace_handled", VCPU_STAT(exit_userspace) },
Christian Borntraeger0eaeafa2008-05-07 09:22:53 +020065 { "exit_null", VCPU_STAT(exit_null) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010066 { "exit_validity", VCPU_STAT(exit_validity) },
67 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
68 { "exit_external_request", VCPU_STAT(exit_external_request) },
Christian Borntraegera5e0acea92018-02-23 07:57:33 +000069 { "exit_io_request", VCPU_STAT(exit_io_request) },
Christian Borntraeger8f2abe62008-03-25 18:47:23 +010070 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010071 { "exit_instruction", VCPU_STAT(exit_instruction) },
Alexander Yarygin9ec6de12016-05-06 16:33:06 +030072 { "exit_pei", VCPU_STAT(exit_pei) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010073 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
74 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
Janosch Franka011eeb2016-05-09 14:14:01 +020075 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010076 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020077 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020078 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Christian Borntraeger8b905d22019-03-05 05:30:02 -050079 { "halt_no_poll_steal", VCPU_STAT(halt_no_poll_steal) },
David Hildenbrandce2e4f02014-07-11 10:00:43 +020080 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Christian Borntraegerf5e10b02008-07-25 15:52:44 +020081 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010082 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
David Hildenbrandaba07502014-01-23 10:47:13 +010083 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
84 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000085 { "deliver_ckc", VCPU_STAT(deliver_ckc) },
86 { "deliver_cputm", VCPU_STAT(deliver_cputm) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010087 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +020088 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010089 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000090 { "deliver_virtio", VCPU_STAT(deliver_virtio) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010091 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
92 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
93 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000094 { "deliver_program", VCPU_STAT(deliver_program) },
95 { "deliver_io", VCPU_STAT(deliver_io) },
QingFeng Hao32de0742018-03-02 11:56:47 +010096 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
Carsten Otteba5c1e92008-03-25 18:47:26 +010097 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
Christian Borntraegerccc40c52018-03-08 12:48:05 +000098 { "inject_ckc", VCPU_STAT(inject_ckc) },
99 { "inject_cputm", VCPU_STAT(inject_cputm) },
100 { "inject_external_call", VCPU_STAT(inject_external_call) },
101 { "inject_float_mchk", VM_STAT(inject_float_mchk) },
102 { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) },
103 { "inject_io", VM_STAT(inject_io) },
104 { "inject_mchk", VCPU_STAT(inject_mchk) },
105 { "inject_pfault_done", VM_STAT(inject_pfault_done) },
106 { "inject_program", VCPU_STAT(inject_program) },
107 { "inject_restart", VCPU_STAT(inject_restart) },
108 { "inject_service_signal", VM_STAT(inject_service_signal) },
109 { "inject_set_prefix", VCPU_STAT(inject_set_prefix) },
110 { "inject_stop_signal", VCPU_STAT(inject_stop_signal) },
111 { "inject_pfault_init", VCPU_STAT(inject_pfault_init) },
112 { "inject_virtio", VM_STAT(inject_virtio) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100113 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
114 { "instruction_gs", VCPU_STAT(instruction_gs) },
115 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
116 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
117 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
Christian Borntraeger69d0d3a2013-06-12 13:54:53 +0200118 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100119 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100120 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100121 { "instruction_sck", VCPU_STAT(instruction_sck) },
122 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100123 { "instruction_spx", VCPU_STAT(instruction_spx) },
124 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
125 { "instruction_stap", VCPU_STAT(instruction_stap) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100126 { "instruction_iske", VCPU_STAT(instruction_iske) },
127 { "instruction_ri", VCPU_STAT(instruction_ri) },
128 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
129 { "instruction_sske", VCPU_STAT(instruction_sske) },
Heiko Carstens8a2422342014-01-10 14:33:28 +0100130 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
Konstantin Weitzb31288f2013-04-17 17:36:29 +0200131 { "instruction_essa", VCPU_STAT(instruction_essa) },
Christian Borntraeger453423d2008-03-25 18:47:29 +0100132 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
133 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100134 { "instruction_tb", VCPU_STAT(instruction_tb) },
135 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
Christian Borntraegerbb25b9b2011-07-24 10:48:17 +0200136 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100137 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
Janosch Frank95ca2cb2016-05-23 15:11:58 +0200138 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
David Hildenbranda3508fb2015-07-08 13:19:48 +0200139 { "instruction_sie", VCPU_STAT(instruction_sie) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100140 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
Cornelia Huckbd59d3a2011-11-17 11:00:42 +0100141 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
Christian Ehrhardt7697e71f2011-10-18 12:27:15 +0200142 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100143 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200144 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
145 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100146 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200147 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
148 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
Eric Farmancd7b4b62015-02-12 09:06:34 -0500149 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
Christian Borntraeger5288fbf2008-03-25 18:47:31 +0100150 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
151 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
152 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
David Hildenbrand42cb0c92014-05-23 12:25:11 +0200153 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
154 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
155 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100156 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
157 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
158 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
Christian Borntraeger8474e5c2019-02-15 13:47:20 +0100159 { "diag_9c_ignored", VCPU_STAT(diagnose_9c_ignored) },
Christian Borntraeger866c1382018-01-24 12:27:01 +0100160 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
161 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
162 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
Christian Borntraegera37cb072018-01-23 13:28:40 +0100163 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100164 { NULL }
165};
166
Collin L. Walling8fa16962016-07-26 15:29:44 -0400167struct kvm_s390_tod_clock_ext {
168 __u8 epoch_idx;
169 __u64 tod;
170 __u8 reserved[7];
171} __packed;
172
David Hildenbranda411edf2016-02-02 15:41:22 +0100173/* allow nested virtualization in KVM (if enabled by user space) */
174static int nested;
175module_param(nested, int, S_IRUGO);
176MODULE_PARM_DESC(nested, "Nested virtualization support");
177
Janosch Franka4499382018-07-13 11:28:31 +0100178/* allow 1m huge page guest backing, if !nested */
179static int hpage;
180module_param(hpage, int, 0444);
181MODULE_PARM_DESC(hpage, "1m huge page backing support");
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100182
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500183/* maximum percentage of steal time for polling. >100 is treated like 100 */
184static u8 halt_poll_max_steal = 10;
185module_param(halt_poll_max_steal, byte, 0644);
Wei Yongjunb41fb522019-05-04 06:51:45 +0000186MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling");
Christian Borntraeger8b905d22019-03-05 05:30:02 -0500187
Michael Muellercc674ef2020-02-27 10:10:31 +0100188/* if set to true, the GISA will be initialized and used if available */
189static bool use_gisa = true;
190module_param(use_gisa, bool, 0644);
191MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
192
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000193/*
194 * For now we handle at most 16 double words as this is what the s390 base
195 * kernel handles and stores in the prefix page. If we ever need to go beyond
196 * this, this requires changes to code, but the external uapi can stay.
197 */
198#define SIZE_INTERNAL 16
199
200/*
201 * Base feature mask that defines default mask for facilities. Consists of the
202 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
203 */
204static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
205/*
206 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
207 * and defines the facilities that can be enabled via a cpu model.
208 */
209static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
210
211static unsigned long kvm_s390_fac_size(void)
Michael Mueller78c4b592013-07-26 15:04:04 +0200212{
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +0000213 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
214 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
215 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
216 sizeof(S390_lowcore.stfle_fac_list));
217
218 return SIZE_INTERNAL;
Michael Mueller78c4b592013-07-26 15:04:04 +0200219}
220
David Hildenbrand15c97052015-03-19 17:36:43 +0100221/* available cpu features supported by kvm */
222static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200223/* available subfunctions indicated via query / "test bit" */
224static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
David Hildenbrand15c97052015-03-19 17:36:43 +0100225
Michael Mueller9d8d5782015-02-02 15:42:51 +0100226static struct gmap_notifier gmap_notifier;
David Hildenbranda3508fb2015-07-08 13:19:48 +0200227static struct gmap_notifier vsie_gmap_notifier;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200228debug_info_t *kvm_s390_dbf;
Janosch Frank3e6c5562019-10-02 04:46:58 -0400229debug_info_t *kvm_s390_dbf_uv;
Michael Mueller9d8d5782015-02-02 15:42:51 +0100230
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100231/* Section: not file related */
Radim Krčmář13a34e02014-08-28 15:13:03 +0200232int kvm_arch_hardware_enable(void)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100233{
234 /* every s390 is virtualization enabled ;-) */
Alexander Graf10474ae2009-09-15 11:37:46 +0200235 return 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100236}
237
Sean Christophersonb9904082020-03-21 13:25:55 -0700238int kvm_arch_check_processor_compat(void *opaque)
Sean Christophersonf257d6d2019-04-19 22:18:17 -0700239{
240 return 0;
241}
242
Janosch Frank29b40f12019-09-30 04:19:18 -0400243/* forward declarations */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +0100244static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
245 unsigned long end);
Janosch Frank29b40f12019-09-30 04:19:18 -0400246static int sca_switch_to_extended(struct kvm *kvm);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200247
David Hildenbrand15757672018-02-07 12:46:45 +0100248static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
249{
250 u8 delta_idx = 0;
251
252 /*
253 * The TOD jumps by delta, we have to compensate this by adding
254 * -delta to the epoch.
255 */
256 delta = -delta;
257
258 /* sign-extension - we're adding to signed values below */
259 if ((s64)delta < 0)
260 delta_idx = -1;
261
262 scb->epoch += delta;
263 if (scb->ecd & ECD_MEF) {
264 scb->epdx += delta_idx;
265 if (scb->epoch < delta)
266 scb->epdx += 1;
267 }
268}
269
Fan Zhangfdf03652015-05-13 10:58:41 +0200270/*
271 * This callback is executed during stop_machine(). All CPUs are therefore
272 * temporarily stopped. In order not to change guest behavior, we have to
273 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
274 * so a CPU won't be stopped while calculating with the epoch.
275 */
276static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
277 void *v)
278{
279 struct kvm *kvm;
280 struct kvm_vcpu *vcpu;
281 int i;
282 unsigned long long *delta = v;
283
284 list_for_each_entry(kvm, &vm_list, vm_list) {
Fan Zhangfdf03652015-05-13 10:58:41 +0200285 kvm_for_each_vcpu(i, vcpu, kvm) {
David Hildenbrand15757672018-02-07 12:46:45 +0100286 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
287 if (i == 0) {
288 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
289 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
290 }
David Hildenbranddb0758b2016-02-15 09:42:25 +0100291 if (vcpu->arch.cputm_enabled)
292 vcpu->arch.cputm_start += *delta;
David Hildenbrand91473b42015-10-29 10:30:36 +0100293 if (vcpu->arch.vsie_block)
David Hildenbrand15757672018-02-07 12:46:45 +0100294 kvm_clock_sync_scb(vcpu->arch.vsie_block,
295 *delta);
Fan Zhangfdf03652015-05-13 10:58:41 +0200296 }
297 }
298 return NOTIFY_OK;
299}
300
301static struct notifier_block kvm_clock_notifier = {
302 .notifier_call = kvm_clock_sync,
303};
304
Sean Christophersonb9904082020-03-21 13:25:55 -0700305int kvm_arch_hardware_setup(void *opaque)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100306{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +0200307 gmap_notifier.notifier_call = kvm_gmap_notifier;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100308 gmap_register_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200309 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
310 gmap_register_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200311 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
312 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100313 return 0;
314}
315
316void kvm_arch_hardware_unsetup(void)
317{
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +0100318 gmap_unregister_pte_notifier(&gmap_notifier);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200319 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
Fan Zhangfdf03652015-05-13 10:58:41 +0200320 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
321 &kvm_clock_notifier);
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100322}
323
David Hildenbrand22be5a132016-01-21 13:22:54 +0100324static void allow_cpu_feat(unsigned long nr)
325{
326 set_bit_inv(nr, kvm_s390_available_cpu_feat);
327}
328
David Hildenbrand0a763c72016-05-18 16:03:47 +0200329static inline int plo_test_bit(unsigned char nr)
330{
331 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
Heiko Carstensd051ae52016-12-13 14:25:32 +0100332 int cc;
David Hildenbrand0a763c72016-05-18 16:03:47 +0200333
334 asm volatile(
335 /* Parameter registers are ignored for "test bit" */
336 " plo 0,0,0,0(0)\n"
337 " ipm %0\n"
338 " srl %0,28\n"
339 : "=d" (cc)
340 : "d" (r0)
341 : "cc");
342 return cc == 0;
343}
344
Heiko Carstensd0dea732019-10-02 14:34:37 +0200345static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
Christian Borntraegerd6681392019-02-20 03:04:07 -0500346{
347 register unsigned long r0 asm("0") = 0; /* query function */
348 register unsigned long r1 asm("1") = (unsigned long) query;
349
350 asm volatile(
351 /* Parameter regs are ignored */
352 " .insn rrf,%[opc] << 16,2,4,6,0\n"
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200353 :
Christian Borntraegerd6681392019-02-20 03:04:07 -0500354 : "d" (r0), "a" (r1), [opc] "i" (opcode)
Heiko Carstensb1c41ac2019-10-02 14:24:47 +0200355 : "cc", "memory");
Christian Borntraegerd6681392019-02-20 03:04:07 -0500356}
357
Christian Borntraeger173aec22018-12-28 10:59:06 +0100358#define INSN_SORTL 0xb938
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100359#define INSN_DFLTCC 0xb939
Christian Borntraeger173aec22018-12-28 10:59:06 +0100360
David Hildenbrand22be5a132016-01-21 13:22:54 +0100361static void kvm_s390_cpu_feat_init(void)
362{
David Hildenbrand0a763c72016-05-18 16:03:47 +0200363 int i;
364
365 for (i = 0; i < 256; ++i) {
366 if (plo_test_bit(i))
367 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
368 }
369
370 if (test_facility(28)) /* TOD-clock steering */
Linus Torvalds221bb8a2016-08-02 16:11:27 -0400371 ptff(kvm_s390_available_subfunc.ptff,
372 sizeof(kvm_s390_available_subfunc.ptff),
373 PTFF_QAF);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200374
375 if (test_facility(17)) { /* MSA */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200376 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
377 kvm_s390_available_subfunc.kmac);
378 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
379 kvm_s390_available_subfunc.kmc);
380 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
381 kvm_s390_available_subfunc.km);
382 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
383 kvm_s390_available_subfunc.kimd);
384 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
385 kvm_s390_available_subfunc.klmd);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200386 }
387 if (test_facility(76)) /* MSA3 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200388 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
389 kvm_s390_available_subfunc.pckmo);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200390 if (test_facility(77)) { /* MSA4 */
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200391 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
392 kvm_s390_available_subfunc.kmctr);
393 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
394 kvm_s390_available_subfunc.kmf);
395 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
396 kvm_s390_available_subfunc.kmo);
397 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
398 kvm_s390_available_subfunc.pcc);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200399 }
400 if (test_facility(57)) /* MSA5 */
Harald Freudenberger985a9d22017-02-24 10:11:54 +0100401 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
Martin Schwidefsky69c0e362016-08-18 12:59:46 +0200402 kvm_s390_available_subfunc.ppno);
David Hildenbrand0a763c72016-05-18 16:03:47 +0200403
Jason J. Hernee000b8e2017-03-20 09:57:42 -0400404 if (test_facility(146)) /* MSA8 */
405 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
406 kvm_s390_available_subfunc.kma);
407
Christian Borntraeger13209ad2018-12-28 09:33:35 +0100408 if (test_facility(155)) /* MSA9 */
409 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
410 kvm_s390_available_subfunc.kdsa);
411
Christian Borntraeger173aec22018-12-28 10:59:06 +0100412 if (test_facility(150)) /* SORTL */
413 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
414
Christian Borntraeger4f45b902018-12-28 10:46:04 +0100415 if (test_facility(151)) /* DFLTCC */
416 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
417
David Hildenbrand22be5a132016-01-21 13:22:54 +0100418 if (MACHINE_HAS_ESOP)
419 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
David Hildenbranda3508fb2015-07-08 13:19:48 +0200420 /*
421 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
422 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
423 */
424 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
David Hildenbranda411edf2016-02-02 15:41:22 +0100425 !test_facility(3) || !nested)
David Hildenbranda3508fb2015-07-08 13:19:48 +0200426 return;
427 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
David Hildenbrand19c439b2015-11-25 11:02:26 +0100428 if (sclp.has_64bscao)
429 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
David Hildenbrand0615a322015-11-25 09:59:49 +0100430 if (sclp.has_siif)
431 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
David Hildenbrand77d18f62015-11-24 16:32:35 +0100432 if (sclp.has_gpere)
433 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
David Hildenbranda1b7b9b2015-11-24 16:41:33 +0100434 if (sclp.has_gsls)
435 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
David Hildenbrand5630a8e2015-11-24 16:53:51 +0100436 if (sclp.has_ib)
437 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
David Hildenbrand13ee3f62015-11-24 16:54:37 +0100438 if (sclp.has_cei)
439 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
David Hildenbrand7fd7f392015-11-24 16:56:23 +0100440 if (sclp.has_ibs)
441 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
Farhan Ali730cd632017-02-24 16:12:56 -0500442 if (sclp.has_kss)
443 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
David Hildenbrand5d3876a2016-04-13 17:06:50 +0200444 /*
445 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
446 * all skey handling functions read/set the skey from the PGSTE
447 * instead of the real storage key.
448 *
449 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
450 * pages being detected as preserved although they are resident.
451 *
452 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
453 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
454 *
455 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
456 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
457 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
458 *
459 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
460 * cannot easily shadow the SCA because of the ipte lock.
461 */
David Hildenbrand22be5a132016-01-21 13:22:54 +0100462}
463
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100464int kvm_arch_init(void *opaque)
465{
Janosch Frankf76f6372019-10-02 03:56:27 -0400466 int rc = -ENOMEM;
Michael Mueller308c3e62018-11-30 15:32:06 +0100467
Christian Borntraeger78f26132015-07-22 15:50:58 +0200468 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
469 if (!kvm_s390_dbf)
470 return -ENOMEM;
471
Janosch Frank3e6c5562019-10-02 04:46:58 -0400472 kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long));
473 if (!kvm_s390_dbf_uv)
474 goto out;
475
476 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) ||
477 debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view))
Janosch Frankf76f6372019-10-02 03:56:27 -0400478 goto out;
Christian Borntraeger78f26132015-07-22 15:50:58 +0200479
David Hildenbrand22be5a132016-01-21 13:22:54 +0100480 kvm_s390_cpu_feat_init();
481
Cornelia Huck84877d92014-09-02 10:27:35 +0100482 /* Register floating interrupt controller interface. */
Michael Mueller308c3e62018-11-30 15:32:06 +0100483 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
484 if (rc) {
Michael Mueller8d43d572018-12-10 11:15:16 +0100485 pr_err("A FLIC registration call failed with rc=%d\n", rc);
Janosch Frankf76f6372019-10-02 03:56:27 -0400486 goto out;
Michael Mueller308c3e62018-11-30 15:32:06 +0100487 }
Michael Muellerb1d1e762019-01-31 09:52:45 +0100488
489 rc = kvm_s390_gib_init(GAL_ISC);
490 if (rc)
Janosch Frankf76f6372019-10-02 03:56:27 -0400491 goto out;
Michael Muellerb1d1e762019-01-31 09:52:45 +0100492
Michael Mueller308c3e62018-11-30 15:32:06 +0100493 return 0;
494
Janosch Frankf76f6372019-10-02 03:56:27 -0400495out:
496 kvm_arch_exit();
Michael Mueller308c3e62018-11-30 15:32:06 +0100497 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100498}
499
Christian Borntraeger78f26132015-07-22 15:50:58 +0200500void kvm_arch_exit(void)
501{
Michael Mueller1282c212019-01-31 09:52:40 +0100502 kvm_s390_gib_destroy();
Christian Borntraeger78f26132015-07-22 15:50:58 +0200503 debug_unregister(kvm_s390_dbf);
Janosch Frank3e6c5562019-10-02 04:46:58 -0400504 debug_unregister(kvm_s390_dbf_uv);
Christian Borntraeger78f26132015-07-22 15:50:58 +0200505}
506
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100507/* Section: device related */
508long kvm_arch_dev_ioctl(struct file *filp,
509 unsigned int ioctl, unsigned long arg)
510{
511 if (ioctl == KVM_S390_ENABLE_SIE)
512 return s390_enable_sie();
513 return -EINVAL;
514}
515
Alexander Graf784aa3d2014-07-14 18:27:35 +0200516int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100517{
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100518 int r;
519
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200520 switch (ext) {
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100521 case KVM_CAP_S390_PSW:
Christian Borntraegerb6cf8782011-09-20 17:07:29 +0200522 case KVM_CAP_S390_GMAP:
Christian Borntraeger52e16b12011-11-17 11:00:44 +0100523 case KVM_CAP_SYNC_MMU:
Carsten Otte1efd0f52012-01-04 10:25:29 +0100524#ifdef CONFIG_KVM_S390_UCONTROL
525 case KVM_CAP_S390_UCONTROL:
526#endif
Dominik Dingel3c038e62013-10-07 17:11:48 +0200527 case KVM_CAP_ASYNC_PF:
Christian Borntraeger60b413c2012-01-11 11:20:31 +0100528 case KVM_CAP_SYNC_REGS:
Carsten Otte14eebd92012-05-15 14:15:26 +0200529 case KVM_CAP_ONE_REG:
Cornelia Huckd6712df2012-12-20 15:32:11 +0100530 case KVM_CAP_ENABLE_CAP:
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +0100531 case KVM_CAP_S390_CSS_SUPPORT:
Cornelia Huck10ccaa12013-02-28 12:33:21 +0100532 case KVM_CAP_IOEVENTFD:
Jens Freimannc05c4182013-10-07 16:13:45 +0200533 case KVM_CAP_DEVICE_CTRL:
Cornelia Huck78599d92014-07-15 09:54:39 +0200534 case KVM_CAP_S390_IRQCHIP:
Dominik Dingelf2061652014-04-09 13:13:00 +0200535 case KVM_CAP_VM_ATTRIBUTES:
David Hildenbrand6352e4d2014-04-10 17:35:00 +0200536 case KVM_CAP_MP_STATE:
Paolo Bonzini460df4c2017-02-08 11:50:15 +0100537 case KVM_CAP_IMMEDIATE_EXIT:
Jens Freimann47b43c52014-11-11 20:57:06 +0100538 case KVM_CAP_S390_INJECT_IRQ:
David Hildenbrand2444b352014-10-09 14:10:13 +0200539 case KVM_CAP_S390_USER_SIGP:
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100540 case KVM_CAP_S390_USER_STSI:
Jason J. Herne30ee2a92014-09-23 09:23:01 -0400541 case KVM_CAP_S390_SKEYS:
Jens Freimann816c7662014-11-24 17:13:46 +0100542 case KVM_CAP_S390_IRQ_STATE:
David Hildenbrand6502a342016-06-21 14:19:51 +0200543 case KVM_CAP_S390_USER_INSTR0:
Claudio Imbrenda4036e382016-08-04 17:58:47 +0200544 case KVM_CAP_S390_CMMA_MIGRATION:
Yi Min Zhao47a46932017-03-10 09:29:38 +0100545 case KVM_CAP_S390_AIS:
Christian Borntraegerda9a1442017-11-09 10:00:45 +0100546 case KVM_CAP_S390_AIS_MIGRATION:
Janosch Frank7de3f142020-01-31 05:02:02 -0500547 case KVM_CAP_S390_VCPU_RESETS:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100548 r = 1;
549 break;
Janosch Franka4499382018-07-13 11:28:31 +0100550 case KVM_CAP_S390_HPAGE_1M:
551 r = 0;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100552 if (hpage && !kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100553 r = 1;
554 break;
Thomas Huth41408c282015-02-06 15:01:21 +0100555 case KVM_CAP_S390_MEM_OP:
556 r = MEM_OP_MAX_SIZE;
557 break;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200558 case KVM_CAP_NR_VCPUS:
559 case KVM_CAP_MAX_VCPUS:
Thomas Hutha86cb412019-05-23 18:43:08 +0200560 case KVM_CAP_MAX_VCPU_ID:
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100561 r = KVM_S390_BSCA_CPU_SLOTS;
David Hildenbranda6940672016-08-08 22:39:32 +0200562 if (!kvm_s390_use_sca_entries())
563 r = KVM_MAX_VCPUS;
564 else if (sclp.has_esca && sclp.has_64bscao)
David Hildenbrand76a6dd72015-11-24 13:33:49 +0100565 r = KVM_S390_ESCA_CPU_SLOTS;
Christian Borntraegere726b1b2012-05-02 10:50:38 +0200566 break;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200567 case KVM_CAP_S390_COW:
Martin Schwidefskyabf09be2012-11-07 13:17:37 +0100568 r = MACHINE_HAS_ESOP;
Christian Borntraeger1526bf92012-05-15 14:15:25 +0200569 break;
Eric Farman68c55752014-06-09 10:57:26 -0400570 case KVM_CAP_S390_VECTOR_REGISTERS:
571 r = MACHINE_HAS_VX;
572 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800573 case KVM_CAP_S390_RI:
574 r = test_facility(64);
575 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100576 case KVM_CAP_S390_GS:
577 r = test_facility(133);
578 break;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +0100579 case KVM_CAP_S390_BPB:
580 r = test_facility(82);
581 break;
Christian Borntraeger13da9ae2020-02-18 15:08:07 -0500582 case KVM_CAP_S390_PROTECTED:
583 r = is_prot_virt_host();
584 break;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200585 default:
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100586 r = 0;
Carsten Otte2bd0ac42008-07-25 15:49:13 +0200587 }
Carsten Otted7b0b5e2009-11-19 14:21:16 +0100588 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100589}
590
Sean Christopherson0dff0842020-02-18 13:07:29 -0800591void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400592{
Janosch Frank0959e162018-07-17 13:21:22 +0100593 int i;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400594 gfn_t cur_gfn, last_gfn;
Janosch Frank0959e162018-07-17 13:21:22 +0100595 unsigned long gaddr, vmaddr;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400596 struct gmap *gmap = kvm->arch.gmap;
Janosch Frank0959e162018-07-17 13:21:22 +0100597 DECLARE_BITMAP(bitmap, _PAGE_ENTRIES);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400598
Janosch Frank0959e162018-07-17 13:21:22 +0100599 /* Loop over all guest segments */
600 cur_gfn = memslot->base_gfn;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400601 last_gfn = memslot->base_gfn + memslot->npages;
Janosch Frank0959e162018-07-17 13:21:22 +0100602 for (; cur_gfn <= last_gfn; cur_gfn += _PAGE_ENTRIES) {
603 gaddr = gfn_to_gpa(cur_gfn);
604 vmaddr = gfn_to_hva_memslot(memslot, cur_gfn);
605 if (kvm_is_error_hva(vmaddr))
606 continue;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400607
Janosch Frank0959e162018-07-17 13:21:22 +0100608 bitmap_zero(bitmap, _PAGE_ENTRIES);
609 gmap_sync_dirty_log_pmd(gmap, bitmap, gaddr, vmaddr);
610 for (i = 0; i < _PAGE_ENTRIES; i++) {
611 if (test_bit(i, bitmap))
612 mark_page_dirty(kvm, cur_gfn + i);
613 }
614
Christian Borntraeger1763f8d2016-02-03 11:12:34 +0100615 if (fatal_signal_pending(current))
616 return;
Christian Borntraeger70c88a02016-02-02 15:15:56 +0100617 cond_resched();
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400618 }
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400619}
620
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100621/* Section: vm related */
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +0200622static void sca_del_vcpu(struct kvm_vcpu *vcpu);
623
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100624/*
625 * Get (and clear) the dirty memory log for a memory slot.
626 */
627int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
628 struct kvm_dirty_log *log)
629{
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400630 int r;
631 unsigned long n;
632 struct kvm_memory_slot *memslot;
Sean Christopherson2a49f612020-02-18 13:07:30 -0800633 int is_dirty;
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400634
Janosch Franke1e8a962017-02-02 16:39:31 +0100635 if (kvm_is_ucontrol(kvm))
636 return -EINVAL;
637
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400638 mutex_lock(&kvm->slots_lock);
639
640 r = -EINVAL;
641 if (log->slot >= KVM_USER_MEM_SLOTS)
642 goto out;
643
Sean Christopherson2a49f612020-02-18 13:07:30 -0800644 r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
Jason J. Herne15f36eb2012-08-02 10:10:17 -0400645 if (r)
646 goto out;
647
648 /* Clear the dirty log */
649 if (is_dirty) {
650 n = kvm_dirty_bitmap_bytes(memslot);
651 memset(memslot->dirty_bitmap, 0, n);
652 }
653 r = 0;
654out:
655 mutex_unlock(&kvm->slots_lock);
656 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +0100657}
658
David Hildenbrand6502a342016-06-21 14:19:51 +0200659static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
660{
661 unsigned int i;
662 struct kvm_vcpu *vcpu;
663
664 kvm_for_each_vcpu(i, vcpu, kvm) {
665 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
666 }
667}
668
Paolo Bonzinie5d83c72017-02-16 10:40:56 +0100669int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
Cornelia Huckd938dc52013-10-23 18:26:34 +0200670{
671 int r;
672
673 if (cap->flags)
674 return -EINVAL;
675
676 switch (cap->cap) {
Cornelia Huck84223592013-07-15 13:36:01 +0200677 case KVM_CAP_S390_IRQCHIP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200678 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
Cornelia Huck84223592013-07-15 13:36:01 +0200679 kvm->arch.use_irqchip = 1;
680 r = 0;
681 break;
David Hildenbrand2444b352014-10-09 14:10:13 +0200682 case KVM_CAP_S390_USER_SIGP:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200683 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
David Hildenbrand2444b352014-10-09 14:10:13 +0200684 kvm->arch.user_sigp = 1;
685 r = 0;
686 break;
Eric Farman68c55752014-06-09 10:57:26 -0400687 case KVM_CAP_S390_VECTOR_REGISTERS:
David Hildenbrand5967c172015-11-06 12:08:48 +0100688 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200689 if (kvm->created_vcpus) {
David Hildenbrand5967c172015-11-06 12:08:48 +0100690 r = -EBUSY;
691 } else if (MACHINE_HAS_VX) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100692 set_kvm_facility(kvm->arch.model.fac_mask, 129);
693 set_kvm_facility(kvm->arch.model.fac_list, 129);
Guenther Hutzl2f87d942016-06-03 14:37:17 +0200694 if (test_facility(134)) {
695 set_kvm_facility(kvm->arch.model.fac_mask, 134);
696 set_kvm_facility(kvm->arch.model.fac_list, 134);
697 }
Maxim Samoylov53743aa2016-02-10 10:31:23 +0100698 if (test_facility(135)) {
699 set_kvm_facility(kvm->arch.model.fac_mask, 135);
700 set_kvm_facility(kvm->arch.model.fac_list, 135);
701 }
Christian Borntraeger7832e912018-12-28 09:43:37 +0100702 if (test_facility(148)) {
703 set_kvm_facility(kvm->arch.model.fac_mask, 148);
704 set_kvm_facility(kvm->arch.model.fac_list, 148);
705 }
Christian Borntraegerd5cb6ab2018-12-28 09:45:58 +0100706 if (test_facility(152)) {
707 set_kvm_facility(kvm->arch.model.fac_mask, 152);
708 set_kvm_facility(kvm->arch.model.fac_list, 152);
709 }
Michael Mueller18280d82015-03-16 16:05:41 +0100710 r = 0;
711 } else
712 r = -EINVAL;
David Hildenbrand5967c172015-11-06 12:08:48 +0100713 mutex_unlock(&kvm->lock);
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200714 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
715 r ? "(not available)" : "(success)");
Eric Farman68c55752014-06-09 10:57:26 -0400716 break;
Fan Zhangc6e5f162016-01-07 18:24:29 +0800717 case KVM_CAP_S390_RI:
718 r = -EINVAL;
719 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200720 if (kvm->created_vcpus) {
Fan Zhangc6e5f162016-01-07 18:24:29 +0800721 r = -EBUSY;
722 } else if (test_facility(64)) {
David Hildenbrandc54f0d62015-12-02 08:53:52 +0100723 set_kvm_facility(kvm->arch.model.fac_mask, 64);
724 set_kvm_facility(kvm->arch.model.fac_list, 64);
Fan Zhangc6e5f162016-01-07 18:24:29 +0800725 r = 0;
726 }
727 mutex_unlock(&kvm->lock);
728 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
729 r ? "(not available)" : "(success)");
730 break;
Yi Min Zhao47a46932017-03-10 09:29:38 +0100731 case KVM_CAP_S390_AIS:
732 mutex_lock(&kvm->lock);
733 if (kvm->created_vcpus) {
734 r = -EBUSY;
735 } else {
736 set_kvm_facility(kvm->arch.model.fac_mask, 72);
737 set_kvm_facility(kvm->arch.model.fac_list, 72);
Yi Min Zhao47a46932017-03-10 09:29:38 +0100738 r = 0;
739 }
740 mutex_unlock(&kvm->lock);
741 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
742 r ? "(not available)" : "(success)");
743 break;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100744 case KVM_CAP_S390_GS:
745 r = -EINVAL;
746 mutex_lock(&kvm->lock);
Christian Borntraeger241e3ec02017-11-16 15:12:52 +0100747 if (kvm->created_vcpus) {
Fan Zhang4e0b1ab2016-11-29 07:17:55 +0100748 r = -EBUSY;
749 } else if (test_facility(133)) {
750 set_kvm_facility(kvm->arch.model.fac_mask, 133);
751 set_kvm_facility(kvm->arch.model.fac_list, 133);
752 r = 0;
753 }
754 mutex_unlock(&kvm->lock);
755 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
756 r ? "(not available)" : "(success)");
757 break;
Janosch Franka4499382018-07-13 11:28:31 +0100758 case KVM_CAP_S390_HPAGE_1M:
759 mutex_lock(&kvm->lock);
760 if (kvm->created_vcpus)
761 r = -EBUSY;
Janosch Frank40ebdb82018-08-01 11:48:28 +0100762 else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
Janosch Franka4499382018-07-13 11:28:31 +0100763 r = -EINVAL;
764 else {
765 r = 0;
Janosch Frankdf88f312018-08-30 16:14:18 +0200766 down_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100767 kvm->mm->context.allow_gmap_hpage_1m = 1;
Janosch Frankdf88f312018-08-30 16:14:18 +0200768 up_write(&kvm->mm->mmap_sem);
Janosch Franka4499382018-07-13 11:28:31 +0100769 /*
770 * We might have to create fake 4k page
771 * tables. To avoid that the hardware works on
772 * stale PGSTEs, we emulate these instructions.
773 */
774 kvm->arch.use_skf = 0;
775 kvm->arch.use_pfmfi = 0;
776 }
777 mutex_unlock(&kvm->lock);
778 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_HPAGE %s",
779 r ? "(not available)" : "(success)");
780 break;
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100781 case KVM_CAP_S390_USER_STSI:
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200782 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
Ekaterina Tumanovae44fc8c2015-01-30 16:55:56 +0100783 kvm->arch.user_stsi = 1;
784 r = 0;
785 break;
David Hildenbrand6502a342016-06-21 14:19:51 +0200786 case KVM_CAP_S390_USER_INSTR0:
787 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
788 kvm->arch.user_instr0 = 1;
789 icpt_operexc_on_all_vcpus(kvm);
790 r = 0;
791 break;
Cornelia Huckd938dc52013-10-23 18:26:34 +0200792 default:
793 r = -EINVAL;
794 break;
795 }
796 return r;
797}
798
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100799static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
800{
801 int ret;
802
803 switch (attr->attr) {
804 case KVM_S390_VM_MEM_LIMIT_SIZE:
805 ret = 0;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200806 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
Dominik Dingela3a92c32014-12-01 17:24:42 +0100807 kvm->arch.mem_limit);
808 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100809 ret = -EFAULT;
810 break;
811 default:
812 ret = -ENXIO;
813 break;
814 }
815 return ret;
816}
817
818static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200819{
820 int ret;
821 unsigned int idx;
822 switch (attr->attr) {
823 case KVM_S390_VM_MEM_ENABLE_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100824 ret = -ENXIO;
David Hildenbrandc24cc9c2015-11-24 13:53:04 +0100825 if (!sclp.has_cmma)
Dominik Dingele6db1d62015-05-07 15:41:57 +0200826 break;
827
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200828 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200829 mutex_lock(&kvm->lock);
Janosch Franka4499382018-07-13 11:28:31 +0100830 if (kvm->created_vcpus)
831 ret = -EBUSY;
832 else if (kvm->mm->context.allow_gmap_hpage_1m)
833 ret = -EINVAL;
834 else {
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200835 kvm->arch.use_cmma = 1;
Janosch Frankc9f0a2b2018-02-16 12:16:14 +0100836 /* Not compatible with cmma. */
837 kvm->arch.use_pfmfi = 0;
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200838 ret = 0;
839 }
840 mutex_unlock(&kvm->lock);
841 break;
842 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +0100843 ret = -ENXIO;
844 if (!sclp.has_cmma)
845 break;
Dominik Dingelc3489152015-06-18 13:17:11 +0200846 ret = -EINVAL;
847 if (!kvm->arch.use_cmma)
848 break;
849
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200850 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200851 mutex_lock(&kvm->lock);
852 idx = srcu_read_lock(&kvm->srcu);
Dominik Dingela13cff32014-10-23 12:07:14 +0200853 s390_reset_cmma(kvm->arch.gmap->mm);
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200854 srcu_read_unlock(&kvm->srcu, idx);
855 mutex_unlock(&kvm->lock);
856 ret = 0;
857 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100858 case KVM_S390_VM_MEM_LIMIT_SIZE: {
859 unsigned long new_limit;
860
861 if (kvm_is_ucontrol(kvm))
862 return -EINVAL;
863
864 if (get_user(new_limit, (u64 __user *)attr->addr))
865 return -EFAULT;
866
Dominik Dingela3a92c32014-12-01 17:24:42 +0100867 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
868 new_limit > kvm->arch.mem_limit)
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100869 return -E2BIG;
870
Dominik Dingela3a92c32014-12-01 17:24:42 +0100871 if (!new_limit)
872 return -EINVAL;
873
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100874 /* gmap_create takes last usable address */
Dominik Dingela3a92c32014-12-01 17:24:42 +0100875 if (new_limit != KVM_S390_NO_MEM_LIMIT)
876 new_limit -= 1;
877
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100878 ret = -EBUSY;
879 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +0200880 if (!kvm->created_vcpus) {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100881 /* gmap_create will round the limit up */
882 struct gmap *new = gmap_create(current->mm, new_limit);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100883
884 if (!new) {
885 ret = -ENOMEM;
886 } else {
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +0100887 gmap_remove(kvm->arch.gmap);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100888 new->private = kvm;
889 kvm->arch.gmap = new;
890 ret = 0;
891 }
892 }
893 mutex_unlock(&kvm->lock);
Dominik Dingela3a92c32014-12-01 17:24:42 +0100894 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
895 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
896 (void *) kvm->arch.gmap->asce);
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +0100897 break;
898 }
Dominik Dingel4f718ea2014-04-09 13:13:00 +0200899 default:
900 ret = -ENXIO;
901 break;
902 }
903 return ret;
904}
905
Tony Krowiaka374e892014-09-03 10:13:53 +0200906static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
907
Tony Krowiak20c922f2018-04-22 11:37:03 -0400908void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
Tony Krowiaka374e892014-09-03 10:13:53 +0200909{
910 struct kvm_vcpu *vcpu;
911 int i;
912
Tony Krowiak20c922f2018-04-22 11:37:03 -0400913 kvm_s390_vcpu_block_all(kvm);
914
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400915 kvm_for_each_vcpu(i, vcpu, kvm) {
Tony Krowiak20c922f2018-04-22 11:37:03 -0400916 kvm_s390_vcpu_crypto_setup(vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -0400917 /* recreate the shadow crycb by leaving the VSIE handler */
918 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
919 }
Tony Krowiak20c922f2018-04-22 11:37:03 -0400920
921 kvm_s390_vcpu_unblock_all(kvm);
922}
923
924static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
925{
Tony Krowiaka374e892014-09-03 10:13:53 +0200926 mutex_lock(&kvm->lock);
927 switch (attr->attr) {
928 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200929 if (!test_kvm_facility(kvm, 76)) {
930 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400931 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200932 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200933 get_random_bytes(
934 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
935 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
936 kvm->arch.crypto.aes_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200937 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200938 break;
939 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200940 if (!test_kvm_facility(kvm, 76)) {
941 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400942 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200943 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200944 get_random_bytes(
945 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
946 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
947 kvm->arch.crypto.dea_kw = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200948 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200949 break;
950 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200951 if (!test_kvm_facility(kvm, 76)) {
952 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400953 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200954 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200955 kvm->arch.crypto.aes_kw = 0;
956 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
957 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200958 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200959 break;
960 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200961 if (!test_kvm_facility(kvm, 76)) {
962 mutex_unlock(&kvm->lock);
Tony Krowiak37940fb2018-09-25 19:16:39 -0400963 return -EINVAL;
Christian Borntraeger8e41bd52018-10-04 14:42:43 +0200964 }
Tony Krowiaka374e892014-09-03 10:13:53 +0200965 kvm->arch.crypto.dea_kw = 0;
966 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
967 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +0200968 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
Tony Krowiaka374e892014-09-03 10:13:53 +0200969 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -0400970 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
971 if (!ap_instructions_available()) {
972 mutex_unlock(&kvm->lock);
973 return -EOPNOTSUPP;
974 }
975 kvm->arch.crypto.apie = 1;
976 break;
977 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
978 if (!ap_instructions_available()) {
979 mutex_unlock(&kvm->lock);
980 return -EOPNOTSUPP;
981 }
982 kvm->arch.crypto.apie = 0;
983 break;
Tony Krowiaka374e892014-09-03 10:13:53 +0200984 default:
985 mutex_unlock(&kvm->lock);
986 return -ENXIO;
987 }
988
Tony Krowiak20c922f2018-04-22 11:37:03 -0400989 kvm_s390_vcpu_crypto_reset_all(kvm);
Tony Krowiaka374e892014-09-03 10:13:53 +0200990 mutex_unlock(&kvm->lock);
991 return 0;
992}
993
Claudio Imbrenda190df4a2016-08-04 17:54:42 +0200994static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
995{
996 int cx;
997 struct kvm_vcpu *vcpu;
998
999 kvm_for_each_vcpu(cx, vcpu, kvm)
1000 kvm_s390_sync_request(req, vcpu);
1001}
1002
1003/*
1004 * Must be called with kvm->srcu held to avoid races on memslots, and with
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001005 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001006 */
1007static int kvm_s390_vm_start_migration(struct kvm *kvm)
1008{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001009 struct kvm_memory_slot *ms;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001010 struct kvm_memslots *slots;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001011 unsigned long ram_pages = 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001012 int slotnr;
1013
1014 /* migration mode already enabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001015 if (kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001016 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001017 slots = kvm_memslots(kvm);
1018 if (!slots || !slots->used_slots)
1019 return -EINVAL;
1020
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001021 if (!kvm->arch.use_cmma) {
1022 kvm->arch.migration_mode = 1;
1023 return 0;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001024 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001025 /* mark all the pages in active slots as dirty */
1026 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1027 ms = slots->memslots + slotnr;
Igor Mammedov13a17cc2019-09-11 03:52:18 -04001028 if (!ms->dirty_bitmap)
1029 return -EINVAL;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001030 /*
1031 * The second half of the bitmap is only used on x86,
1032 * and would be wasted otherwise, so we put it to good
1033 * use here to keep track of the state of the storage
1034 * attributes.
1035 */
1036 memset(kvm_second_dirty_bitmap(ms), 0xff, kvm_dirty_bitmap_bytes(ms));
1037 ram_pages += ms->npages;
1038 }
1039 atomic64_set(&kvm->arch.cmma_dirty_pages, ram_pages);
1040 kvm->arch.migration_mode = 1;
1041 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001042 return 0;
1043}
1044
1045/*
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001046 * Must be called with kvm->slots_lock to avoid races with ourselves and
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001047 * kvm_s390_vm_start_migration.
1048 */
1049static int kvm_s390_vm_stop_migration(struct kvm *kvm)
1050{
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001051 /* migration mode already disabled */
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001052 if (!kvm->arch.migration_mode)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001053 return 0;
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001054 kvm->arch.migration_mode = 0;
1055 if (kvm->arch.use_cmma)
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001056 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001057 return 0;
1058}
1059
1060static int kvm_s390_vm_set_migration(struct kvm *kvm,
1061 struct kvm_device_attr *attr)
1062{
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001063 int res = -ENXIO;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001064
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001065 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001066 switch (attr->attr) {
1067 case KVM_S390_VM_MIGRATION_START:
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001068 res = kvm_s390_vm_start_migration(kvm);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001069 break;
1070 case KVM_S390_VM_MIGRATION_STOP:
1071 res = kvm_s390_vm_stop_migration(kvm);
1072 break;
1073 default:
1074 break;
1075 }
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01001076 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001077
1078 return res;
1079}
1080
1081static int kvm_s390_vm_get_migration(struct kvm *kvm,
1082 struct kvm_device_attr *attr)
1083{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001084 u64 mig = kvm->arch.migration_mode;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001085
1086 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1087 return -ENXIO;
1088
1089 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1090 return -EFAULT;
1091 return 0;
1092}
1093
Collin L. Walling8fa16962016-07-26 15:29:44 -04001094static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1095{
1096 struct kvm_s390_vm_tod_clock gtod;
1097
1098 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1099 return -EFAULT;
1100
David Hildenbrand0e7def52018-02-07 12:46:43 +01001101 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001102 return -EINVAL;
David Hildenbrand0e7def52018-02-07 12:46:43 +01001103 kvm_s390_set_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001104
1105 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1106 gtod.epoch_idx, gtod.tod);
1107
1108 return 0;
1109}
1110
Jason J. Herne72f25022014-11-25 09:46:02 -05001111static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1112{
1113 u8 gtod_high;
1114
1115 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1116 sizeof(gtod_high)))
1117 return -EFAULT;
1118
1119 if (gtod_high != 0)
1120 return -EINVAL;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001121 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001122
1123 return 0;
1124}
1125
1126static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1127{
David Hildenbrand0e7def52018-02-07 12:46:43 +01001128 struct kvm_s390_vm_tod_clock gtod = { 0 };
Jason J. Herne72f25022014-11-25 09:46:02 -05001129
David Hildenbrand0e7def52018-02-07 12:46:43 +01001130 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1131 sizeof(gtod.tod)))
Jason J. Herne72f25022014-11-25 09:46:02 -05001132 return -EFAULT;
1133
David Hildenbrand0e7def52018-02-07 12:46:43 +01001134 kvm_s390_set_tod_clock(kvm, &gtod);
1135 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001136 return 0;
1137}
1138
1139static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1140{
1141 int ret;
1142
1143 if (attr->flags)
1144 return -EINVAL;
1145
1146 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001147 case KVM_S390_VM_TOD_EXT:
1148 ret = kvm_s390_set_tod_ext(kvm, attr);
1149 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001150 case KVM_S390_VM_TOD_HIGH:
1151 ret = kvm_s390_set_tod_high(kvm, attr);
1152 break;
1153 case KVM_S390_VM_TOD_LOW:
1154 ret = kvm_s390_set_tod_low(kvm, attr);
1155 break;
1156 default:
1157 ret = -ENXIO;
1158 break;
1159 }
1160 return ret;
1161}
1162
David Hildenbrand33d1b272018-04-27 14:36:13 +02001163static void kvm_s390_get_tod_clock(struct kvm *kvm,
1164 struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04001165{
1166 struct kvm_s390_tod_clock_ext htod;
1167
1168 preempt_disable();
1169
1170 get_tod_clock_ext((char *)&htod);
1171
1172 gtod->tod = htod.tod + kvm->arch.epoch;
David Hildenbrand33d1b272018-04-27 14:36:13 +02001173 gtod->epoch_idx = 0;
1174 if (test_kvm_facility(kvm, 139)) {
1175 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1176 if (gtod->tod < htod.tod)
1177 gtod->epoch_idx += 1;
1178 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04001179
1180 preempt_enable();
1181}
1182
1183static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1184{
1185 struct kvm_s390_vm_tod_clock gtod;
1186
1187 memset(&gtod, 0, sizeof(gtod));
David Hildenbrand33d1b272018-04-27 14:36:13 +02001188 kvm_s390_get_tod_clock(kvm, &gtod);
Collin L. Walling8fa16962016-07-26 15:29:44 -04001189 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1190 return -EFAULT;
1191
1192 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1193 gtod.epoch_idx, gtod.tod);
1194 return 0;
1195}
1196
Jason J. Herne72f25022014-11-25 09:46:02 -05001197static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1198{
1199 u8 gtod_high = 0;
1200
1201 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1202 sizeof(gtod_high)))
1203 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001204 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
Jason J. Herne72f25022014-11-25 09:46:02 -05001205
1206 return 0;
1207}
1208
1209static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1210{
David Hildenbrand5a3d8832015-09-29 16:27:24 +02001211 u64 gtod;
Jason J. Herne72f25022014-11-25 09:46:02 -05001212
David Hildenbrand60417fc2015-09-29 16:20:36 +02001213 gtod = kvm_s390_get_tod_clock_fast(kvm);
Jason J. Herne72f25022014-11-25 09:46:02 -05001214 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1215 return -EFAULT;
Christian Borntraeger58c383c2015-10-12 13:27:29 +02001216 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
Jason J. Herne72f25022014-11-25 09:46:02 -05001217
1218 return 0;
1219}
1220
1221static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1222{
1223 int ret;
1224
1225 if (attr->flags)
1226 return -EINVAL;
1227
1228 switch (attr->attr) {
Collin L. Walling8fa16962016-07-26 15:29:44 -04001229 case KVM_S390_VM_TOD_EXT:
1230 ret = kvm_s390_get_tod_ext(kvm, attr);
1231 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001232 case KVM_S390_VM_TOD_HIGH:
1233 ret = kvm_s390_get_tod_high(kvm, attr);
1234 break;
1235 case KVM_S390_VM_TOD_LOW:
1236 ret = kvm_s390_get_tod_low(kvm, attr);
1237 break;
1238 default:
1239 ret = -ENXIO;
1240 break;
1241 }
1242 return ret;
1243}
1244
Michael Mueller658b6ed2015-02-02 15:49:35 +01001245static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1246{
1247 struct kvm_s390_vm_cpu_processor *proc;
David Hildenbrand053dd232016-04-04 13:59:42 +02001248 u16 lowest_ibc, unblocked_ibc;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001249 int ret = 0;
1250
1251 mutex_lock(&kvm->lock);
Paolo Bonzinia03825b2016-06-13 14:50:04 +02001252 if (kvm->created_vcpus) {
Michael Mueller658b6ed2015-02-02 15:49:35 +01001253 ret = -EBUSY;
1254 goto out;
1255 }
1256 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1257 if (!proc) {
1258 ret = -ENOMEM;
1259 goto out;
1260 }
1261 if (!copy_from_user(proc, (void __user *)attr->addr,
1262 sizeof(*proc))) {
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001263 kvm->arch.model.cpuid = proc->cpuid;
David Hildenbrand053dd232016-04-04 13:59:42 +02001264 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1265 unblocked_ibc = sclp.ibc & 0xfff;
David Hildenbrand0487c442016-06-10 09:22:31 +02001266 if (lowest_ibc && proc->ibc) {
David Hildenbrand053dd232016-04-04 13:59:42 +02001267 if (proc->ibc > unblocked_ibc)
1268 kvm->arch.model.ibc = unblocked_ibc;
1269 else if (proc->ibc < lowest_ibc)
1270 kvm->arch.model.ibc = lowest_ibc;
1271 else
1272 kvm->arch.model.ibc = proc->ibc;
1273 }
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001274 memcpy(kvm->arch.model.fac_list, proc->fac_list,
Michael Mueller658b6ed2015-02-02 15:49:35 +01001275 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001276 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1277 kvm->arch.model.ibc,
1278 kvm->arch.model.cpuid);
1279 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1280 kvm->arch.model.fac_list[0],
1281 kvm->arch.model.fac_list[1],
1282 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001283 } else
1284 ret = -EFAULT;
1285 kfree(proc);
1286out:
1287 mutex_unlock(&kvm->lock);
1288 return ret;
1289}
1290
David Hildenbrand15c97052015-03-19 17:36:43 +01001291static int kvm_s390_set_processor_feat(struct kvm *kvm,
1292 struct kvm_device_attr *attr)
1293{
1294 struct kvm_s390_vm_cpu_feat data;
David Hildenbrand15c97052015-03-19 17:36:43 +01001295
1296 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1297 return -EFAULT;
1298 if (!bitmap_subset((unsigned long *) data.feat,
1299 kvm_s390_available_cpu_feat,
1300 KVM_S390_VM_CPU_FEAT_NR_BITS))
1301 return -EINVAL;
1302
1303 mutex_lock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001304 if (kvm->created_vcpus) {
1305 mutex_unlock(&kvm->lock);
1306 return -EBUSY;
David Hildenbrand15c97052015-03-19 17:36:43 +01001307 }
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001308 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1309 KVM_S390_VM_CPU_FEAT_NR_BITS);
David Hildenbrand15c97052015-03-19 17:36:43 +01001310 mutex_unlock(&kvm->lock);
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001311 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1312 data.feat[0],
1313 data.feat[1],
1314 data.feat[2]);
1315 return 0;
David Hildenbrand15c97052015-03-19 17:36:43 +01001316}
1317
David Hildenbrand0a763c72016-05-18 16:03:47 +02001318static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1319 struct kvm_device_attr *attr)
1320{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001321 mutex_lock(&kvm->lock);
1322 if (kvm->created_vcpus) {
1323 mutex_unlock(&kvm->lock);
1324 return -EBUSY;
1325 }
1326
1327 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1328 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1329 mutex_unlock(&kvm->lock);
1330 return -EFAULT;
1331 }
1332 mutex_unlock(&kvm->lock);
1333
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001334 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1335 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1336 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1337 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1338 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1339 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1340 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1341 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1342 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1343 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1344 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1345 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1346 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1347 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1348 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1349 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1350 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1351 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1352 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1353 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1354 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1355 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1356 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1357 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1358 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1359 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1360 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1361 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1362 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1363 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1364 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1365 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1366 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1367 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1368 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1369 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1370 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1371 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1372 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1373 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1374 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1375 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1376 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1377 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001378 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1379 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1380 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001381 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1382 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1383 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1384 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1385 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001386 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1387 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1388 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1389 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1390 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001391
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001392 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001393}
1394
Michael Mueller658b6ed2015-02-02 15:49:35 +01001395static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1396{
1397 int ret = -ENXIO;
1398
1399 switch (attr->attr) {
1400 case KVM_S390_VM_CPU_PROCESSOR:
1401 ret = kvm_s390_set_processor(kvm, attr);
1402 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001403 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1404 ret = kvm_s390_set_processor_feat(kvm, attr);
1405 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001406 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1407 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1408 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001409 }
1410 return ret;
1411}
1412
1413static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1414{
1415 struct kvm_s390_vm_cpu_processor *proc;
1416 int ret = 0;
1417
1418 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1419 if (!proc) {
1420 ret = -ENOMEM;
1421 goto out;
1422 }
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02001423 proc->cpuid = kvm->arch.model.cpuid;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001424 proc->ibc = kvm->arch.model.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001425 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1426 S390_ARCH_FAC_LIST_SIZE_BYTE);
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001427 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1428 kvm->arch.model.ibc,
1429 kvm->arch.model.cpuid);
1430 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1431 kvm->arch.model.fac_list[0],
1432 kvm->arch.model.fac_list[1],
1433 kvm->arch.model.fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001434 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1435 ret = -EFAULT;
1436 kfree(proc);
1437out:
1438 return ret;
1439}
1440
1441static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1442{
1443 struct kvm_s390_vm_cpu_machine *mach;
1444 int ret = 0;
1445
1446 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1447 if (!mach) {
1448 ret = -ENOMEM;
1449 goto out;
1450 }
1451 get_cpu_id((struct cpuid *) &mach->cpuid);
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02001452 mach->ibc = sclp.ibc;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01001453 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
Michael Mueller981467c2015-02-24 13:51:04 +01001454 S390_ARCH_FAC_LIST_SIZE_BYTE);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001455 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
Christian Borntraeger04478192017-01-12 16:25:15 +01001456 sizeof(S390_lowcore.stfle_fac_list));
Christian Borntraegera8c39dd2017-01-18 16:01:02 +01001457 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1458 kvm->arch.model.ibc,
1459 kvm->arch.model.cpuid);
1460 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1461 mach->fac_mask[0],
1462 mach->fac_mask[1],
1463 mach->fac_mask[2]);
1464 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1465 mach->fac_list[0],
1466 mach->fac_list[1],
1467 mach->fac_list[2]);
Michael Mueller658b6ed2015-02-02 15:49:35 +01001468 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1469 ret = -EFAULT;
1470 kfree(mach);
1471out:
1472 return ret;
1473}
1474
David Hildenbrand15c97052015-03-19 17:36:43 +01001475static int kvm_s390_get_processor_feat(struct kvm *kvm,
1476 struct kvm_device_attr *attr)
1477{
1478 struct kvm_s390_vm_cpu_feat data;
1479
1480 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1481 KVM_S390_VM_CPU_FEAT_NR_BITS);
1482 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1483 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001484 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1485 data.feat[0],
1486 data.feat[1],
1487 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001488 return 0;
1489}
1490
1491static int kvm_s390_get_machine_feat(struct kvm *kvm,
1492 struct kvm_device_attr *attr)
1493{
1494 struct kvm_s390_vm_cpu_feat data;
1495
1496 bitmap_copy((unsigned long *) data.feat,
1497 kvm_s390_available_cpu_feat,
1498 KVM_S390_VM_CPU_FEAT_NR_BITS);
1499 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1500 return -EFAULT;
Christian Borntraeger2f8311c2017-11-16 12:30:15 +01001501 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1502 data.feat[0],
1503 data.feat[1],
1504 data.feat[2]);
David Hildenbrand15c97052015-03-19 17:36:43 +01001505 return 0;
1506}
1507
David Hildenbrand0a763c72016-05-18 16:03:47 +02001508static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1509 struct kvm_device_attr *attr)
1510{
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001511 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1512 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1513 return -EFAULT;
1514
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001515 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1516 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1517 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1518 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1519 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1520 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1521 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1522 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1523 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1524 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1525 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1526 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1527 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1528 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1529 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1530 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1531 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1532 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1533 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1534 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1535 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1536 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1537 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1538 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1539 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1540 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1541 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1542 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1543 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1544 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1545 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1546 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1547 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1548 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1549 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1550 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1551 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1552 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1553 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1554 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1555 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1556 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1557 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1558 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001559 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1560 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1561 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001562 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1563 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1564 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1565 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1566 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001567 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1568 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1569 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1570 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1571 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001572
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001573 return 0;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001574}
1575
1576static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1577 struct kvm_device_attr *attr)
1578{
1579 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1580 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1581 return -EFAULT;
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001582
1583 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1584 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1585 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1586 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1587 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1588 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1589 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1590 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1591 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1592 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1593 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1594 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1595 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1596 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1597 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1598 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1599 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1600 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1601 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1602 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1603 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1604 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1605 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1606 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1607 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1608 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1609 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1610 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1611 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1612 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1613 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1614 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1615 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1616 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1617 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1618 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1619 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1620 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1621 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1622 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1623 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1624 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1625 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1626 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
Christian Borntraeger13209ad2018-12-28 09:33:35 +01001627 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1628 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1629 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
Christian Borntraeger173aec22018-12-28 10:59:06 +01001630 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1631 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1632 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1633 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1634 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
Christian Borntraeger4f45b902018-12-28 10:46:04 +01001635 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1636 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1637 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1638 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1639 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
Christian Borntraeger11ba5962019-02-20 11:38:42 -05001640
David Hildenbrand0a763c72016-05-18 16:03:47 +02001641 return 0;
1642}
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001643
Michael Mueller658b6ed2015-02-02 15:49:35 +01001644static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1645{
1646 int ret = -ENXIO;
1647
1648 switch (attr->attr) {
1649 case KVM_S390_VM_CPU_PROCESSOR:
1650 ret = kvm_s390_get_processor(kvm, attr);
1651 break;
1652 case KVM_S390_VM_CPU_MACHINE:
1653 ret = kvm_s390_get_machine(kvm, attr);
1654 break;
David Hildenbrand15c97052015-03-19 17:36:43 +01001655 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1656 ret = kvm_s390_get_processor_feat(kvm, attr);
1657 break;
1658 case KVM_S390_VM_CPU_MACHINE_FEAT:
1659 ret = kvm_s390_get_machine_feat(kvm, attr);
1660 break;
David Hildenbrand0a763c72016-05-18 16:03:47 +02001661 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1662 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1663 break;
1664 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1665 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1666 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001667 }
1668 return ret;
1669}
1670
Dominik Dingelf2061652014-04-09 13:13:00 +02001671static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1672{
1673 int ret;
1674
1675 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001676 case KVM_S390_VM_MEM_CTRL:
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001677 ret = kvm_s390_set_mem_control(kvm, attr);
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001678 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001679 case KVM_S390_VM_TOD:
1680 ret = kvm_s390_set_tod(kvm, attr);
1681 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001682 case KVM_S390_VM_CPU_MODEL:
1683 ret = kvm_s390_set_cpu_model(kvm, attr);
1684 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001685 case KVM_S390_VM_CRYPTO:
1686 ret = kvm_s390_vm_set_crypto(kvm, attr);
1687 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001688 case KVM_S390_VM_MIGRATION:
1689 ret = kvm_s390_vm_set_migration(kvm, attr);
1690 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001691 default:
1692 ret = -ENXIO;
1693 break;
1694 }
1695
1696 return ret;
1697}
1698
1699static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1700{
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001701 int ret;
1702
1703 switch (attr->group) {
1704 case KVM_S390_VM_MEM_CTRL:
1705 ret = kvm_s390_get_mem_control(kvm, attr);
1706 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001707 case KVM_S390_VM_TOD:
1708 ret = kvm_s390_get_tod(kvm, attr);
1709 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001710 case KVM_S390_VM_CPU_MODEL:
1711 ret = kvm_s390_get_cpu_model(kvm, attr);
1712 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001713 case KVM_S390_VM_MIGRATION:
1714 ret = kvm_s390_vm_get_migration(kvm, attr);
1715 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001716 default:
1717 ret = -ENXIO;
1718 break;
1719 }
1720
1721 return ret;
Dominik Dingelf2061652014-04-09 13:13:00 +02001722}
1723
1724static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1725{
1726 int ret;
1727
1728 switch (attr->group) {
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001729 case KVM_S390_VM_MEM_CTRL:
1730 switch (attr->attr) {
1731 case KVM_S390_VM_MEM_ENABLE_CMMA:
1732 case KVM_S390_VM_MEM_CLR_CMMA:
David Hildenbrandf9cbd9b2016-03-03 09:48:47 +01001733 ret = sclp.has_cmma ? 0 : -ENXIO;
1734 break;
Dominik Dingel8c0a7ce2014-10-31 14:10:41 +01001735 case KVM_S390_VM_MEM_LIMIT_SIZE:
Dominik Dingel4f718ea2014-04-09 13:13:00 +02001736 ret = 0;
1737 break;
1738 default:
1739 ret = -ENXIO;
1740 break;
1741 }
1742 break;
Jason J. Herne72f25022014-11-25 09:46:02 -05001743 case KVM_S390_VM_TOD:
1744 switch (attr->attr) {
1745 case KVM_S390_VM_TOD_LOW:
1746 case KVM_S390_VM_TOD_HIGH:
1747 ret = 0;
1748 break;
1749 default:
1750 ret = -ENXIO;
1751 break;
1752 }
1753 break;
Michael Mueller658b6ed2015-02-02 15:49:35 +01001754 case KVM_S390_VM_CPU_MODEL:
1755 switch (attr->attr) {
1756 case KVM_S390_VM_CPU_PROCESSOR:
1757 case KVM_S390_VM_CPU_MACHINE:
David Hildenbrand15c97052015-03-19 17:36:43 +01001758 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1759 case KVM_S390_VM_CPU_MACHINE_FEAT:
David Hildenbrand0a763c72016-05-18 16:03:47 +02001760 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05001761 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
Michael Mueller658b6ed2015-02-02 15:49:35 +01001762 ret = 0;
1763 break;
1764 default:
1765 ret = -ENXIO;
1766 break;
1767 }
1768 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001769 case KVM_S390_VM_CRYPTO:
1770 switch (attr->attr) {
1771 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1772 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1773 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1774 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1775 ret = 0;
1776 break;
Tony Krowiak37940fb2018-09-25 19:16:39 -04001777 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1778 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1779 ret = ap_instructions_available() ? 0 : -ENXIO;
1780 break;
Tony Krowiaka374e892014-09-03 10:13:53 +02001781 default:
1782 ret = -ENXIO;
1783 break;
1784 }
1785 break;
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02001786 case KVM_S390_VM_MIGRATION:
1787 ret = 0;
1788 break;
Dominik Dingelf2061652014-04-09 13:13:00 +02001789 default:
1790 ret = -ENXIO;
1791 break;
1792 }
1793
1794 return ret;
1795}
1796
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001797static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1798{
1799 uint8_t *keys;
1800 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001801 int srcu_idx, i, r = 0;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001802
1803 if (args->flags != 0)
1804 return -EINVAL;
1805
1806 /* Is this guest using storage keys? */
Janosch Frank55531b72018-02-15 16:33:47 +01001807 if (!mm_uses_skeys(current->mm))
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001808 return KVM_S390_GET_SKEYS_NONE;
1809
1810 /* Enforce sane limit on memory allocation */
1811 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1812 return -EINVAL;
1813
Michal Hocko752ade62017-05-08 15:57:27 -07001814 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001815 if (!keys)
1816 return -ENOMEM;
1817
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001818 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001819 srcu_idx = srcu_read_lock(&kvm->srcu);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001820 for (i = 0; i < args->count; i++) {
1821 hva = gfn_to_hva(kvm, args->start_gfn + i);
1822 if (kvm_is_error_hva(hva)) {
1823 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001824 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001825 }
1826
David Hildenbrand154c8c12016-05-09 11:22:34 +02001827 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1828 if (r)
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001829 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001830 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001831 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001832 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001833
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001834 if (!r) {
1835 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1836 sizeof(uint8_t) * args->count);
1837 if (r)
1838 r = -EFAULT;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001839 }
1840
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001841 kvfree(keys);
1842 return r;
1843}
1844
1845static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1846{
1847 uint8_t *keys;
1848 uint64_t hva;
Christian Borntraeger4f899142017-07-10 13:35:48 +02001849 int srcu_idx, i, r = 0;
Janosch Frankbd096f62018-07-18 13:40:22 +01001850 bool unlocked;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001851
1852 if (args->flags != 0)
1853 return -EINVAL;
1854
1855 /* Enforce sane limit on memory allocation */
1856 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1857 return -EINVAL;
1858
Michal Hocko752ade62017-05-08 15:57:27 -07001859 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001860 if (!keys)
1861 return -ENOMEM;
1862
1863 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1864 sizeof(uint8_t) * args->count);
1865 if (r) {
1866 r = -EFAULT;
1867 goto out;
1868 }
1869
1870 /* Enable storage key handling for the guest */
Dominik Dingel14d4a422015-05-07 15:16:13 +02001871 r = s390_enable_skey();
1872 if (r)
1873 goto out;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001874
Janosch Frankbd096f62018-07-18 13:40:22 +01001875 i = 0;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001876 down_read(&current->mm->mmap_sem);
Christian Borntraeger4f899142017-07-10 13:35:48 +02001877 srcu_idx = srcu_read_lock(&kvm->srcu);
Janosch Frankbd096f62018-07-18 13:40:22 +01001878 while (i < args->count) {
1879 unlocked = false;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001880 hva = gfn_to_hva(kvm, args->start_gfn + i);
1881 if (kvm_is_error_hva(hva)) {
1882 r = -EFAULT;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001883 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001884 }
1885
1886 /* Lowest order bit is reserved */
1887 if (keys[i] & 0x01) {
1888 r = -EINVAL;
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001889 break;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001890 }
1891
David Hildenbrandfe69eab2016-05-09 13:08:07 +02001892 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
Janosch Frankbd096f62018-07-18 13:40:22 +01001893 if (r) {
1894 r = fixup_user_fault(current, current->mm, hva,
1895 FAULT_FLAG_WRITE, &unlocked);
1896 if (r)
1897 break;
1898 }
1899 if (!r)
1900 i++;
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001901 }
Christian Borntraeger4f899142017-07-10 13:35:48 +02001902 srcu_read_unlock(&kvm->srcu, srcu_idx);
Martin Schwidefskyd3ed1ce2016-03-08 11:53:35 +01001903 up_read(&current->mm->mmap_sem);
Jason J. Herne30ee2a92014-09-23 09:23:01 -04001904out:
1905 kvfree(keys);
1906 return r;
1907}
1908
Claudio Imbrenda4036e382016-08-04 17:58:47 +02001909/*
1910 * Base address and length must be sent at the start of each block, therefore
1911 * it's cheaper to send some clean data, as long as it's less than the size of
1912 * two longs.
1913 */
1914#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1915/* for consistency */
1916#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1917
1918/*
Claudio Imbrendaafdad612018-04-30 18:33:25 +02001919 * Similar to gfn_to_memslot, but returns the index of a memslot also when the
1920 * address falls in a hole. In that case the index of one of the memslots
1921 * bordering the hole is returned.
1922 */
1923static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
1924{
1925 int start = 0, end = slots->used_slots;
1926 int slot = atomic_read(&slots->lru_slot);
1927 struct kvm_memory_slot *memslots = slots->memslots;
1928
1929 if (gfn >= memslots[slot].base_gfn &&
1930 gfn < memslots[slot].base_gfn + memslots[slot].npages)
1931 return slot;
1932
1933 while (start < end) {
1934 slot = start + (end - start) / 2;
1935
1936 if (gfn >= memslots[slot].base_gfn)
1937 end = slot;
1938 else
1939 start = slot + 1;
1940 }
1941
1942 if (gfn >= memslots[start].base_gfn &&
1943 gfn < memslots[start].base_gfn + memslots[start].npages) {
1944 atomic_set(&slots->lru_slot, start);
1945 }
1946
1947 return start;
1948}
1949
1950static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1951 u8 *res, unsigned long bufsize)
1952{
1953 unsigned long pgstev, hva, cur_gfn = args->start_gfn;
1954
1955 args->count = 0;
1956 while (args->count < bufsize) {
1957 hva = gfn_to_hva(kvm, cur_gfn);
1958 /*
1959 * We return an error if the first value was invalid, but we
1960 * return successfully if at least one value was copied.
1961 */
1962 if (kvm_is_error_hva(hva))
1963 return args->count ? 0 : -EFAULT;
1964 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
1965 pgstev = 0;
1966 res[args->count++] = (pgstev >> 24) & 0x43;
1967 cur_gfn++;
1968 }
1969
1970 return 0;
1971}
1972
1973static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
1974 unsigned long cur_gfn)
1975{
1976 int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
1977 struct kvm_memory_slot *ms = slots->memslots + slotidx;
1978 unsigned long ofs = cur_gfn - ms->base_gfn;
1979
1980 if (ms->base_gfn + ms->npages <= cur_gfn) {
1981 slotidx--;
1982 /* If we are above the highest slot, wrap around */
1983 if (slotidx < 0)
1984 slotidx = slots->used_slots - 1;
1985
1986 ms = slots->memslots + slotidx;
1987 ofs = 0;
1988 }
1989 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
1990 while ((slotidx > 0) && (ofs >= ms->npages)) {
1991 slotidx--;
1992 ms = slots->memslots + slotidx;
1993 ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
1994 }
1995 return ms->base_gfn + ofs;
1996}
1997
1998static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
1999 u8 *res, unsigned long bufsize)
2000{
2001 unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev;
2002 struct kvm_memslots *slots = kvm_memslots(kvm);
2003 struct kvm_memory_slot *ms;
2004
Sean Christopherson0774a962020-03-20 13:55:40 -07002005 if (unlikely(!slots->used_slots))
2006 return 0;
2007
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002008 cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
2009 ms = gfn_to_memslot(kvm, cur_gfn);
2010 args->count = 0;
2011 args->start_gfn = cur_gfn;
2012 if (!ms)
2013 return 0;
2014 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2015 mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
2016
2017 while (args->count < bufsize) {
2018 hva = gfn_to_hva(kvm, cur_gfn);
2019 if (kvm_is_error_hva(hva))
2020 return 0;
2021 /* Decrement only if we actually flipped the bit to 0 */
2022 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
2023 atomic64_dec(&kvm->arch.cmma_dirty_pages);
2024 if (get_pgste(kvm->mm, hva, &pgstev) < 0)
2025 pgstev = 0;
2026 /* Save the value */
2027 res[args->count++] = (pgstev >> 24) & 0x43;
2028 /* If the next bit is too far away, stop. */
2029 if (next_gfn > cur_gfn + KVM_S390_MAX_BIT_DISTANCE)
2030 return 0;
2031 /* If we reached the previous "next", find the next one */
2032 if (cur_gfn == next_gfn)
2033 next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
2034 /* Reached the end of memory or of the buffer, stop */
2035 if ((next_gfn >= mem_end) ||
2036 (next_gfn - args->start_gfn >= bufsize))
2037 return 0;
2038 cur_gfn++;
2039 /* Reached the end of the current memslot, take the next one. */
2040 if (cur_gfn - ms->base_gfn >= ms->npages) {
2041 ms = gfn_to_memslot(kvm, cur_gfn);
2042 if (!ms)
2043 return 0;
2044 }
2045 }
2046 return 0;
2047}
2048
2049/*
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002050 * This function searches for the next page with dirty CMMA attributes, and
2051 * saves the attributes in the buffer up to either the end of the buffer or
2052 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
2053 * no trailing clean bytes are saved.
2054 * In case no dirty bits were found, or if CMMA was not enabled or used, the
2055 * output buffer will indicate 0 as length.
2056 */
2057static int kvm_s390_get_cmma_bits(struct kvm *kvm,
2058 struct kvm_s390_cmma_log *args)
2059{
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002060 unsigned long bufsize;
2061 int srcu_idx, peek, ret;
2062 u8 *values;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002063
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002064 if (!kvm->arch.use_cmma)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002065 return -ENXIO;
2066 /* Invalid/unsupported flags were specified */
2067 if (args->flags & ~KVM_S390_CMMA_PEEK)
2068 return -EINVAL;
2069 /* Migration mode query, and we are not doing a migration */
2070 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002071 if (!peek && !kvm->arch.migration_mode)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002072 return -EINVAL;
2073 /* CMMA is disabled or was not used, or the buffer has length zero */
2074 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002075 if (!bufsize || !kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002076 memset(args, 0, sizeof(*args));
2077 return 0;
2078 }
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002079 /* We are not peeking, and there are no dirty pages */
2080 if (!peek && !atomic64_read(&kvm->arch.cmma_dirty_pages)) {
2081 memset(args, 0, sizeof(*args));
2082 return 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002083 }
2084
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002085 values = vmalloc(bufsize);
2086 if (!values)
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002087 return -ENOMEM;
2088
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002089 down_read(&kvm->mm->mmap_sem);
2090 srcu_idx = srcu_read_lock(&kvm->srcu);
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002091 if (peek)
2092 ret = kvm_s390_peek_cmma(kvm, args, values, bufsize);
2093 else
2094 ret = kvm_s390_get_cmma(kvm, args, values, bufsize);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002095 srcu_read_unlock(&kvm->srcu, srcu_idx);
2096 up_read(&kvm->mm->mmap_sem);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002097
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002098 if (kvm->arch.migration_mode)
2099 args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages);
2100 else
2101 args->remaining = 0;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002102
Claudio Imbrendaafdad612018-04-30 18:33:25 +02002103 if (copy_to_user((void __user *)args->values, values, args->count))
2104 ret = -EFAULT;
2105
2106 vfree(values);
2107 return ret;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002108}
2109
2110/*
2111 * This function sets the CMMA attributes for the given pages. If the input
2112 * buffer has zero length, no action is taken, otherwise the attributes are
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002113 * set and the mm->context.uses_cmm flag is set.
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002114 */
2115static int kvm_s390_set_cmma_bits(struct kvm *kvm,
2116 const struct kvm_s390_cmma_log *args)
2117{
2118 unsigned long hva, mask, pgstev, i;
2119 uint8_t *bits;
2120 int srcu_idx, r = 0;
2121
2122 mask = args->mask;
2123
2124 if (!kvm->arch.use_cmma)
2125 return -ENXIO;
2126 /* invalid/unsupported flags */
2127 if (args->flags != 0)
2128 return -EINVAL;
2129 /* Enforce sane limit on memory allocation */
2130 if (args->count > KVM_S390_CMMA_SIZE_MAX)
2131 return -EINVAL;
2132 /* Nothing to do */
2133 if (args->count == 0)
2134 return 0;
2135
Kees Cook42bc47b2018-06-12 14:27:11 -07002136 bits = vmalloc(array_size(sizeof(*bits), args->count));
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002137 if (!bits)
2138 return -ENOMEM;
2139
2140 r = copy_from_user(bits, (void __user *)args->values, args->count);
2141 if (r) {
2142 r = -EFAULT;
2143 goto out;
2144 }
2145
2146 down_read(&kvm->mm->mmap_sem);
2147 srcu_idx = srcu_read_lock(&kvm->srcu);
2148 for (i = 0; i < args->count; i++) {
2149 hva = gfn_to_hva(kvm, args->start_gfn + i);
2150 if (kvm_is_error_hva(hva)) {
2151 r = -EFAULT;
2152 break;
2153 }
2154
2155 pgstev = bits[i];
2156 pgstev = pgstev << 24;
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002157 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002158 set_pgste_bits(kvm->mm, hva, mask, pgstev);
2159 }
2160 srcu_read_unlock(&kvm->srcu, srcu_idx);
2161 up_read(&kvm->mm->mmap_sem);
2162
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002163 if (!kvm->mm->context.uses_cmm) {
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002164 down_write(&kvm->mm->mmap_sem);
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002165 kvm->mm->context.uses_cmm = 1;
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002166 up_write(&kvm->mm->mmap_sem);
2167 }
2168out:
2169 vfree(bits);
2170 return r;
2171}
2172
Janosch Frank29b40f12019-09-30 04:19:18 -04002173static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
2174{
2175 struct kvm_vcpu *vcpu;
2176 u16 rc, rrc;
2177 int ret = 0;
2178 int i;
2179
2180 /*
2181 * We ignore failures and try to destroy as many CPUs as possible.
2182 * At the same time we must not free the assigned resources when
2183 * this fails, as the ultravisor has still access to that memory.
2184 * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak
2185 * behind.
2186 * We want to return the first failure rc and rrc, though.
2187 */
2188 kvm_for_each_vcpu(i, vcpu, kvm) {
2189 mutex_lock(&vcpu->mutex);
2190 if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) {
2191 *rcp = rc;
2192 *rrcp = rrc;
2193 ret = -EIO;
2194 }
2195 mutex_unlock(&vcpu->mutex);
2196 }
2197 return ret;
2198}
2199
2200static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
2201{
2202 int i, r = 0;
2203 u16 dummy;
2204
2205 struct kvm_vcpu *vcpu;
2206
2207 kvm_for_each_vcpu(i, vcpu, kvm) {
2208 mutex_lock(&vcpu->mutex);
2209 r = kvm_s390_pv_create_cpu(vcpu, rc, rrc);
2210 mutex_unlock(&vcpu->mutex);
2211 if (r)
2212 break;
2213 }
2214 if (r)
2215 kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
2216 return r;
2217}
2218
2219static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd)
2220{
2221 int r = 0;
2222 u16 dummy;
2223 void __user *argp = (void __user *)cmd->data;
2224
2225 switch (cmd->cmd) {
2226 case KVM_PV_ENABLE: {
2227 r = -EINVAL;
2228 if (kvm_s390_pv_is_protected(kvm))
2229 break;
2230
2231 /*
2232 * FMT 4 SIE needs esca. As we never switch back to bsca from
2233 * esca, we need no cleanup in the error cases below
2234 */
2235 r = sca_switch_to_extended(kvm);
2236 if (r)
2237 break;
2238
Janosch Frankfa0c5ea2019-07-16 13:08:37 +02002239 down_write(&current->mm->mmap_sem);
2240 r = gmap_mark_unmergeable();
2241 up_write(&current->mm->mmap_sem);
2242 if (r)
2243 break;
2244
Janosch Frank29b40f12019-09-30 04:19:18 -04002245 r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc);
2246 if (r)
2247 break;
2248
2249 r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc);
2250 if (r)
2251 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002252
2253 /* we need to block service interrupts from now on */
2254 set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002255 break;
2256 }
2257 case KVM_PV_DISABLE: {
2258 r = -EINVAL;
2259 if (!kvm_s390_pv_is_protected(kvm))
2260 break;
2261
2262 r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc);
2263 /*
2264 * If a CPU could not be destroyed, destroy VM will also fail.
2265 * There is no point in trying to destroy it. Instead return
2266 * the rc and rrc from the first CPU that failed destroying.
2267 */
2268 if (r)
2269 break;
2270 r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc);
Christian Borntraeger0890dde2020-02-03 09:13:37 +01002271
2272 /* no need to block service interrupts any more */
2273 clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs);
Janosch Frank29b40f12019-09-30 04:19:18 -04002274 break;
2275 }
2276 case KVM_PV_SET_SEC_PARMS: {
2277 struct kvm_s390_pv_sec_parm parms = {};
2278 void *hdr;
2279
2280 r = -EINVAL;
2281 if (!kvm_s390_pv_is_protected(kvm))
2282 break;
2283
2284 r = -EFAULT;
2285 if (copy_from_user(&parms, argp, sizeof(parms)))
2286 break;
2287
2288 /* Currently restricted to 8KB */
2289 r = -EINVAL;
2290 if (parms.length > PAGE_SIZE * 2)
2291 break;
2292
2293 r = -ENOMEM;
2294 hdr = vmalloc(parms.length);
2295 if (!hdr)
2296 break;
2297
2298 r = -EFAULT;
2299 if (!copy_from_user(hdr, (void __user *)parms.origin,
2300 parms.length))
2301 r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length,
2302 &cmd->rc, &cmd->rrc);
2303
2304 vfree(hdr);
2305 break;
2306 }
2307 case KVM_PV_UNPACK: {
2308 struct kvm_s390_pv_unp unp = {};
2309
2310 r = -EINVAL;
2311 if (!kvm_s390_pv_is_protected(kvm))
2312 break;
2313
2314 r = -EFAULT;
2315 if (copy_from_user(&unp, argp, sizeof(unp)))
2316 break;
2317
2318 r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak,
2319 &cmd->rc, &cmd->rrc);
2320 break;
2321 }
2322 case KVM_PV_VERIFY: {
2323 r = -EINVAL;
2324 if (!kvm_s390_pv_is_protected(kvm))
2325 break;
2326
2327 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2328 UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc);
2329 KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc,
2330 cmd->rrc);
2331 break;
2332 }
Janosch Franke0d27732019-05-09 13:07:21 +02002333 case KVM_PV_PREP_RESET: {
2334 r = -EINVAL;
2335 if (!kvm_s390_pv_is_protected(kvm))
2336 break;
2337
2338 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2339 UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc);
2340 KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x",
2341 cmd->rc, cmd->rrc);
2342 break;
2343 }
2344 case KVM_PV_UNSHARE_ALL: {
2345 r = -EINVAL;
2346 if (!kvm_s390_pv_is_protected(kvm))
2347 break;
2348
2349 r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
2350 UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc);
2351 KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x",
2352 cmd->rc, cmd->rrc);
2353 break;
2354 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002355 default:
2356 r = -ENOTTY;
2357 }
2358 return r;
2359}
2360
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002361long kvm_arch_vm_ioctl(struct file *filp,
2362 unsigned int ioctl, unsigned long arg)
2363{
2364 struct kvm *kvm = filp->private_data;
2365 void __user *argp = (void __user *)arg;
Dominik Dingelf2061652014-04-09 13:13:00 +02002366 struct kvm_device_attr attr;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002367 int r;
2368
2369 switch (ioctl) {
Carsten Otteba5c1e92008-03-25 18:47:26 +01002370 case KVM_S390_INTERRUPT: {
2371 struct kvm_s390_interrupt s390int;
2372
2373 r = -EFAULT;
2374 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2375 break;
2376 r = kvm_s390_inject_vm(kvm, &s390int);
2377 break;
2378 }
Cornelia Huck84223592013-07-15 13:36:01 +02002379 case KVM_CREATE_IRQCHIP: {
2380 struct kvm_irq_routing_entry routing;
2381
2382 r = -EINVAL;
2383 if (kvm->arch.use_irqchip) {
2384 /* Set up dummy routing. */
2385 memset(&routing, 0, sizeof(routing));
Nicholas Krause152b2832015-08-06 13:05:54 -04002386 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
Cornelia Huck84223592013-07-15 13:36:01 +02002387 }
2388 break;
2389 }
Dominik Dingelf2061652014-04-09 13:13:00 +02002390 case KVM_SET_DEVICE_ATTR: {
2391 r = -EFAULT;
2392 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2393 break;
2394 r = kvm_s390_vm_set_attr(kvm, &attr);
2395 break;
2396 }
2397 case KVM_GET_DEVICE_ATTR: {
2398 r = -EFAULT;
2399 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2400 break;
2401 r = kvm_s390_vm_get_attr(kvm, &attr);
2402 break;
2403 }
2404 case KVM_HAS_DEVICE_ATTR: {
2405 r = -EFAULT;
2406 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2407 break;
2408 r = kvm_s390_vm_has_attr(kvm, &attr);
2409 break;
2410 }
Jason J. Herne30ee2a92014-09-23 09:23:01 -04002411 case KVM_S390_GET_SKEYS: {
2412 struct kvm_s390_skeys args;
2413
2414 r = -EFAULT;
2415 if (copy_from_user(&args, argp,
2416 sizeof(struct kvm_s390_skeys)))
2417 break;
2418 r = kvm_s390_get_skeys(kvm, &args);
2419 break;
2420 }
2421 case KVM_S390_SET_SKEYS: {
2422 struct kvm_s390_skeys args;
2423
2424 r = -EFAULT;
2425 if (copy_from_user(&args, argp,
2426 sizeof(struct kvm_s390_skeys)))
2427 break;
2428 r = kvm_s390_set_skeys(kvm, &args);
2429 break;
2430 }
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002431 case KVM_S390_GET_CMMA_BITS: {
2432 struct kvm_s390_cmma_log args;
2433
2434 r = -EFAULT;
2435 if (copy_from_user(&args, argp, sizeof(args)))
2436 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002437 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002438 r = kvm_s390_get_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002439 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002440 if (!r) {
2441 r = copy_to_user(argp, &args, sizeof(args));
2442 if (r)
2443 r = -EFAULT;
2444 }
2445 break;
2446 }
2447 case KVM_S390_SET_CMMA_BITS: {
2448 struct kvm_s390_cmma_log args;
2449
2450 r = -EFAULT;
2451 if (copy_from_user(&args, argp, sizeof(args)))
2452 break;
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002453 mutex_lock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002454 r = kvm_s390_set_cmma_bits(kvm, &args);
Christian Borntraeger1de1ea72017-12-22 10:54:20 +01002455 mutex_unlock(&kvm->slots_lock);
Claudio Imbrenda4036e382016-08-04 17:58:47 +02002456 break;
2457 }
Janosch Frank29b40f12019-09-30 04:19:18 -04002458 case KVM_S390_PV_COMMAND: {
2459 struct kvm_pv_cmd args;
2460
Janosch Frankfe28c7862019-05-15 13:24:30 +02002461 /* protvirt means user sigp */
2462 kvm->arch.user_cpu_state_ctrl = 1;
Janosch Frank29b40f12019-09-30 04:19:18 -04002463 r = 0;
2464 if (!is_prot_virt_host()) {
2465 r = -EINVAL;
2466 break;
2467 }
2468 if (copy_from_user(&args, argp, sizeof(args))) {
2469 r = -EFAULT;
2470 break;
2471 }
2472 if (args.flags) {
2473 r = -EINVAL;
2474 break;
2475 }
2476 mutex_lock(&kvm->lock);
2477 r = kvm_s390_handle_pv(kvm, &args);
2478 mutex_unlock(&kvm->lock);
2479 if (copy_to_user(argp, &args, sizeof(args))) {
2480 r = -EFAULT;
2481 break;
2482 }
2483 break;
2484 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002485 default:
Avi Kivity367e1312009-08-26 14:57:07 +03002486 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002487 }
2488
2489 return r;
2490}
2491
Tony Krowiak45c9b472015-01-13 11:33:26 -05002492static int kvm_s390_apxa_installed(void)
2493{
Tony Krowiake585b242018-09-25 19:16:18 -04002494 struct ap_config_info info;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002495
Tony Krowiake585b242018-09-25 19:16:18 -04002496 if (ap_instructions_available()) {
2497 if (ap_qci(&info) == 0)
2498 return info.apxa;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002499 }
2500
2501 return 0;
2502}
2503
Tony Krowiake585b242018-09-25 19:16:18 -04002504/*
2505 * The format of the crypto control block (CRYCB) is specified in the 3 low
2506 * order bits of the CRYCB designation (CRYCBD) field as follows:
2507 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2508 * AP extended addressing (APXA) facility are installed.
2509 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2510 * Format 2: Both the APXA and MSAX3 facilities are installed
2511 */
Tony Krowiak45c9b472015-01-13 11:33:26 -05002512static void kvm_s390_set_crycb_format(struct kvm *kvm)
2513{
2514 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2515
Tony Krowiake585b242018-09-25 19:16:18 -04002516 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2517 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2518
2519 /* Check whether MSAX3 is installed */
2520 if (!test_kvm_facility(kvm, 76))
2521 return;
2522
Tony Krowiak45c9b472015-01-13 11:33:26 -05002523 if (kvm_s390_apxa_installed())
2524 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2525 else
2526 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2527}
2528
Pierre Morel0e237e42018-10-05 10:31:09 +02002529void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2530 unsigned long *aqm, unsigned long *adm)
2531{
2532 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2533
2534 mutex_lock(&kvm->lock);
2535 kvm_s390_vcpu_block_all(kvm);
2536
2537 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2538 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2539 memcpy(crycb->apcb1.apm, apm, 32);
2540 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2541 apm[0], apm[1], apm[2], apm[3]);
2542 memcpy(crycb->apcb1.aqm, aqm, 32);
2543 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2544 aqm[0], aqm[1], aqm[2], aqm[3]);
2545 memcpy(crycb->apcb1.adm, adm, 32);
2546 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2547 adm[0], adm[1], adm[2], adm[3]);
2548 break;
2549 case CRYCB_FORMAT1:
2550 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2551 memcpy(crycb->apcb0.apm, apm, 8);
2552 memcpy(crycb->apcb0.aqm, aqm, 2);
2553 memcpy(crycb->apcb0.adm, adm, 2);
2554 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2555 apm[0], *((unsigned short *)aqm),
2556 *((unsigned short *)adm));
2557 break;
2558 default: /* Can not happen */
2559 break;
2560 }
2561
2562 /* recreate the shadow crycb for each vcpu */
2563 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2564 kvm_s390_vcpu_unblock_all(kvm);
2565 mutex_unlock(&kvm->lock);
2566}
2567EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2568
Tony Krowiak421045982018-09-25 19:16:25 -04002569void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2570{
2571 mutex_lock(&kvm->lock);
2572 kvm_s390_vcpu_block_all(kvm);
2573
2574 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2575 sizeof(kvm->arch.crypto.crycb->apcb0));
2576 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2577 sizeof(kvm->arch.crypto.crycb->apcb1));
2578
Pierre Morel0e237e42018-10-05 10:31:09 +02002579 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
Pierre Morel6cc571b2018-09-25 19:16:30 -04002580 /* recreate the shadow crycb for each vcpu */
2581 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
Tony Krowiak421045982018-09-25 19:16:25 -04002582 kvm_s390_vcpu_unblock_all(kvm);
2583 mutex_unlock(&kvm->lock);
2584}
2585EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2586
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002587static u64 kvm_s390_get_initial_cpuid(void)
Michael Mueller9d8d5782015-02-02 15:42:51 +01002588{
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002589 struct cpuid cpuid;
2590
2591 get_cpu_id(&cpuid);
2592 cpuid.version = 0xff;
2593 return *((u64 *) &cpuid);
Michael Mueller9d8d5782015-02-02 15:42:51 +01002594}
2595
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002596static void kvm_s390_crypto_init(struct kvm *kvm)
Tony Krowiak5102ee82014-06-27 14:46:01 -04002597{
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002598 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
Tony Krowiak45c9b472015-01-13 11:33:26 -05002599 kvm_s390_set_crycb_format(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002600
Tony Krowiake585b242018-09-25 19:16:18 -04002601 if (!test_kvm_facility(kvm, 76))
2602 return;
2603
Tony Krowiaked6f76b2015-02-24 14:06:57 -05002604 /* Enable AES/DEA protected key functions by default */
2605 kvm->arch.crypto.aes_kw = 1;
2606 kvm->arch.crypto.dea_kw = 1;
2607 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2608 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2609 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2610 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
Tony Krowiak5102ee82014-06-27 14:46:01 -04002611}
2612
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002613static void sca_dispose(struct kvm *kvm)
2614{
2615 if (kvm->arch.use_esca)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002616 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002617 else
2618 free_page((unsigned long)(kvm->arch.sca));
2619 kvm->arch.sca = NULL;
2620}
2621
Carsten Ottee08b9632012-01-04 10:25:20 +01002622int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002623{
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002624 gfp_t alloc_flags = GFP_KERNEL;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002625 int i, rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002626 char debug_name[16];
Christian Borntraegerf6c137f2014-03-19 11:18:29 +01002627 static unsigned long sca_offset;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002628
Carsten Ottee08b9632012-01-04 10:25:20 +01002629 rc = -EINVAL;
2630#ifdef CONFIG_KVM_S390_UCONTROL
2631 if (type & ~KVM_VM_S390_UCONTROL)
2632 goto out_err;
2633 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2634 goto out_err;
2635#else
2636 if (type)
2637 goto out_err;
2638#endif
2639
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002640 rc = s390_enable_sie();
2641 if (rc)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002642 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002643
Carsten Otteb2904112011-10-18 12:27:13 +02002644 rc = -ENOMEM;
2645
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002646 if (!sclp.has_64bscao)
2647 alloc_flags |= GFP_DMA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002648 rwlock_init(&kvm->arch.sca_lock);
David Hildenbrand9ac96d72018-04-27 14:36:12 +02002649 /* start with basic SCA */
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002650 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002651 if (!kvm->arch.sca)
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002652 goto out_err;
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002653 mutex_lock(&kvm_lock);
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002654 sca_offset += 16;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002655 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
David Hildenbrandc5c2c392015-10-26 08:41:29 +01002656 sca_offset = 0;
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002657 kvm->arch.sca = (struct bsca_block *)
2658 ((char *) kvm->arch.sca + sca_offset);
Junaid Shahid0d9ce162019-01-03 17:14:28 -08002659 mutex_unlock(&kvm_lock);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002660
2661 sprintf(debug_name, "kvm-%u", current->pid);
2662
Christian Borntraeger1cb9cf72015-07-20 15:04:48 +02002663 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002664 if (!kvm->arch.dbf)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002665 goto out_err;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002666
Michael Mueller19114be2017-05-30 14:26:02 +02002667 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002668 kvm->arch.sie_page2 =
2669 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2670 if (!kvm->arch.sie_page2)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002671 goto out_err;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002672
Michael Mueller25c84db2019-01-31 09:52:41 +01002673 kvm->arch.sie_page2->kvm = kvm;
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002674 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00002675
2676 for (i = 0; i < kvm_s390_fac_size(); i++) {
2677 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2678 (kvm_s390_fac_base[i] |
2679 kvm_s390_fac_ext[i]);
2680 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2681 kvm_s390_fac_base[i];
2682 }
Christian Borntraeger346fa2f2019-02-18 07:48:25 -05002683 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
Michael Mueller981467c2015-02-24 13:51:04 +01002684
David Hildenbrand19352222017-08-29 16:31:08 +02002685 /* we are always in czam mode - even on pre z14 machines */
2686 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2687 set_kvm_facility(kvm->arch.model.fac_list, 138);
2688 /* we emulate STHYI in kvm */
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002689 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2690 set_kvm_facility(kvm->arch.model.fac_list, 74);
Claudio Imbrenda1bab1c02016-08-29 15:56:55 +02002691 if (MACHINE_HAS_TLB_GUEST) {
2692 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2693 set_kvm_facility(kvm->arch.model.fac_list, 147);
2694 }
Janosch Frank95ca2cb2016-05-23 15:11:58 +02002695
Pierre Morel05f31e32019-05-21 17:34:37 +02002696 if (css_general_characteristics.aiv && test_facility(65))
2697 set_kvm_facility(kvm->arch.model.fac_mask, 65);
2698
David Hildenbrand9bb0ec02016-04-04 14:27:51 +02002699 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02002700 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
Michael Mueller9d8d5782015-02-02 15:42:51 +01002701
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002702 kvm_s390_crypto_init(kvm);
Tony Krowiak5102ee82014-06-27 14:46:01 -04002703
Fei Li51978392017-02-17 17:06:26 +08002704 mutex_init(&kvm->arch.float_int.ais_lock);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002705 spin_lock_init(&kvm->arch.float_int.lock);
Jens Freimann6d3da242013-07-03 15:18:35 +02002706 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2707 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
Heiko Carstens8a2422342014-01-10 14:33:28 +01002708 init_waitqueue_head(&kvm->arch.ipte_wq);
Thomas Hutha6b7e452014-10-01 14:48:42 +02002709 mutex_init(&kvm->arch.ipte_mutex);
Carsten Otteba5c1e92008-03-25 18:47:26 +01002710
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002711 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002712 VM_EVENT(kvm, 3, "vm created with type %lu", type);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002713
Carsten Ottee08b9632012-01-04 10:25:20 +01002714 if (type & KVM_VM_S390_UCONTROL) {
2715 kvm->arch.gmap = NULL;
Dominik Dingela3a92c32014-12-01 17:24:42 +01002716 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
Carsten Ottee08b9632012-01-04 10:25:20 +01002717 } else {
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002718 if (sclp.hamax == U64_MAX)
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002719 kvm->arch.mem_limit = TASK_SIZE_MAX;
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002720 else
Martin Schwidefskyee71d162017-04-20 14:43:51 +02002721 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
Guenther Hutzl32e6b232014-12-01 17:24:42 +01002722 sclp.hamax + 1);
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002723 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
Carsten Ottee08b9632012-01-04 10:25:20 +01002724 if (!kvm->arch.gmap)
Dominik Dingel40f5b732015-03-12 13:55:53 +01002725 goto out_err;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02002726 kvm->arch.gmap->private = kvm;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02002727 kvm->arch.gmap->pfault_enabled = 0;
Carsten Ottee08b9632012-01-04 10:25:20 +01002728 }
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01002729
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01002730 kvm->arch.use_pfmfi = sclp.has_pfmfi;
Janosch Frank55531b72018-02-15 16:33:47 +01002731 kvm->arch.use_skf = sclp.has_skey;
David Hildenbrand8ad35752014-03-14 11:00:21 +01002732 spin_lock_init(&kvm->arch.start_stop_lock);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002733 kvm_s390_vsie_init(kvm);
Michael Muellercc674ef2020-02-27 10:10:31 +01002734 if (use_gisa)
2735 kvm_s390_gisa_init(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002736 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
David Hildenbrand8ad35752014-03-14 11:00:21 +01002737
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002738 return 0;
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002739out_err:
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002740 free_page((unsigned long)kvm->arch.sie_page2);
Dominik Dingel40f5b732015-03-12 13:55:53 +01002741 debug_unregister(kvm->arch.dbf);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002742 sca_dispose(kvm);
Christian Borntraeger78f26132015-07-22 15:50:58 +02002743 KVM_EVENT(3, "creation of vm failed: %d", rc);
Jan Kiszkad89f5ef2010-11-09 17:02:49 +01002744 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002745}
2746
Christian Borntraegerd329c032008-11-26 14:50:27 +01002747void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2748{
Janosch Frank29b40f12019-09-30 04:19:18 -04002749 u16 rc, rrc;
2750
Christian Borntraegerd329c032008-11-26 14:50:27 +01002751 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
Cornelia Huckade38c32012-07-23 17:20:30 +02002752 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002753 kvm_s390_clear_local_irqs(vcpu);
Dominik Dingel3c038e62013-10-07 17:11:48 +02002754 kvm_clear_async_pf_completion_queue(vcpu);
Eugene (jno) Dvurechenskibc784cc2015-04-23 16:09:06 +02002755 if (!kvm_is_ucontrol(vcpu->kvm))
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002756 sca_del_vcpu(vcpu);
Carsten Otte27e03932012-01-04 10:25:21 +01002757
2758 if (kvm_is_ucontrol(vcpu->kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002759 gmap_remove(vcpu->arch.gmap);
Carsten Otte27e03932012-01-04 10:25:21 +01002760
Dominik Dingele6db1d62015-05-07 15:41:57 +02002761 if (vcpu->kvm->arch.use_cmma)
Dominik Dingelb31605c2014-03-25 13:47:11 +01002762 kvm_s390_vcpu_unsetup_cmma(vcpu);
Janosch Frank29b40f12019-09-30 04:19:18 -04002763 /* We can not hold the vcpu mutex here, we are already dying */
2764 if (kvm_s390_pv_cpu_get_handle(vcpu))
2765 kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002766 free_page((unsigned long)(vcpu->arch.sie_block));
Christian Borntraegerd329c032008-11-26 14:50:27 +01002767}
2768
2769static void kvm_free_vcpus(struct kvm *kvm)
2770{
2771 unsigned int i;
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002772 struct kvm_vcpu *vcpu;
Christian Borntraegerd329c032008-11-26 14:50:27 +01002773
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002774 kvm_for_each_vcpu(i, vcpu, kvm)
Sean Christopherson4543bdc2019-12-18 13:55:14 -08002775 kvm_vcpu_destroy(vcpu);
Gleb Natapov988a2ca2009-06-09 15:56:29 +03002776
2777 mutex_lock(&kvm->lock);
2778 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2779 kvm->vcpus[i] = NULL;
2780
2781 atomic_set(&kvm->online_vcpus, 0);
2782 mutex_unlock(&kvm->lock);
Christian Borntraegerd329c032008-11-26 14:50:27 +01002783}
2784
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002785void kvm_arch_destroy_vm(struct kvm *kvm)
2786{
Janosch Frank29b40f12019-09-30 04:19:18 -04002787 u16 rc, rrc;
2788
Christian Borntraegerd329c032008-11-26 14:50:27 +01002789 kvm_free_vcpus(kvm);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002790 sca_dispose(kvm);
Michael Muellerd7c5cb02017-06-12 14:15:19 +02002791 kvm_s390_gisa_destroy(kvm);
Janosch Frank29b40f12019-09-30 04:19:18 -04002792 /*
2793 * We are already at the end of life and kvm->lock is not taken.
2794 * This is ok as the file descriptor is closed by now and nobody
2795 * can mess with the pv state. To avoid lockdep_assert_held from
2796 * complaining we do not use kvm_s390_pv_is_protected.
2797 */
2798 if (kvm_s390_pv_get_handle(kvm))
2799 kvm_s390_pv_deinit_vm(kvm, &rc, &rrc);
2800 debug_unregister(kvm->arch.dbf);
David Hildenbrandc54f0d62015-12-02 08:53:52 +01002801 free_page((unsigned long)kvm->arch.sie_page2);
Carsten Otte27e03932012-01-04 10:25:21 +01002802 if (!kvm_is_ucontrol(kvm))
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002803 gmap_remove(kvm->arch.gmap);
Cornelia Huck841b91c2013-07-15 13:36:01 +02002804 kvm_s390_destroy_adapters(kvm);
Christian Borntraeger67335e62014-03-25 17:09:08 +01002805 kvm_s390_clear_float_irqs(kvm);
David Hildenbranda3508fb2015-07-08 13:19:48 +02002806 kvm_s390_vsie_destroy(kvm);
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002807 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01002808}
2809
2810/* Section: vcpu related */
Dominik Dingeldafd0322014-12-02 16:53:21 +01002811static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2812{
Martin Schwidefsky6ea427b2016-03-08 11:55:04 +01002813 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
Dominik Dingeldafd0322014-12-02 16:53:21 +01002814 if (!vcpu->arch.gmap)
2815 return -ENOMEM;
2816 vcpu->arch.gmap->private = vcpu->kvm;
2817
2818 return 0;
2819}
2820
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002821static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2822{
David Hildenbranda6940672016-08-08 22:39:32 +02002823 if (!kvm_s390_use_sca_entries())
2824 return;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002825 read_lock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002826 if (vcpu->kvm->arch.use_esca) {
2827 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002828
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002829 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002830 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002831 } else {
2832 struct bsca_block *sca = vcpu->kvm->arch.sca;
2833
2834 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
David Hildenbrand10ce32d2015-10-12 12:41:41 +02002835 sca->cpu[vcpu->vcpu_id].sda = 0;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002836 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002837 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002838}
2839
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002840static void sca_add_vcpu(struct kvm_vcpu *vcpu)
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002841{
David Hildenbranda6940672016-08-08 22:39:32 +02002842 if (!kvm_s390_use_sca_entries()) {
2843 struct bsca_block *sca = vcpu->kvm->arch.sca;
2844
2845 /* we still need the basic sca for the ipte control */
2846 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2847 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandf07afa02018-03-06 14:27:58 +01002848 return;
David Hildenbranda6940672016-08-08 22:39:32 +02002849 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002850 read_lock(&vcpu->kvm->arch.sca_lock);
2851 if (vcpu->kvm->arch.use_esca) {
2852 struct esca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002853
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002854 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002855 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2856 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002857 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002858 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002859 } else {
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002860 struct bsca_block *sca = vcpu->kvm->arch.sca;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002861
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002862 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002863 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2864 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002865 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
Eugene (jno) Dvurechenski7d43bafcf2015-04-22 17:09:44 +02002866 }
David Hildenbrandeaa78f32015-10-12 16:29:01 +02002867 read_unlock(&vcpu->kvm->arch.sca_lock);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002868}
2869
2870/* Basic SCA to Extended SCA data copy routines */
2871static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2872{
2873 d->sda = s->sda;
2874 d->sigp_ctrl.c = s->sigp_ctrl.c;
2875 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2876}
2877
2878static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2879{
2880 int i;
2881
2882 d->ipte_control = s->ipte_control;
2883 d->mcn[0] = s->mcn;
2884 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2885 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2886}
2887
2888static int sca_switch_to_extended(struct kvm *kvm)
2889{
2890 struct bsca_block *old_sca = kvm->arch.sca;
2891 struct esca_block *new_sca;
2892 struct kvm_vcpu *vcpu;
2893 unsigned int vcpu_idx;
2894 u32 scaol, scaoh;
2895
Janosch Frank29b40f12019-09-30 04:19:18 -04002896 if (kvm->arch.use_esca)
2897 return 0;
2898
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002899 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2900 if (!new_sca)
2901 return -ENOMEM;
2902
2903 scaoh = (u32)((u64)(new_sca) >> 32);
2904 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2905
2906 kvm_s390_vcpu_block_all(kvm);
2907 write_lock(&kvm->arch.sca_lock);
2908
2909 sca_copy_b_to_e(new_sca, old_sca);
2910
2911 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2912 vcpu->arch.sie_block->scaoh = scaoh;
2913 vcpu->arch.sie_block->scaol = scaol;
David Hildenbrand0c9d8682017-03-13 11:48:28 +01002914 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002915 }
2916 kvm->arch.sca = new_sca;
2917 kvm->arch.use_esca = 1;
2918
2919 write_unlock(&kvm->arch.sca_lock);
2920 kvm_s390_vcpu_unblock_all(kvm);
2921
2922 free_page((unsigned long)old_sca);
2923
Christian Borntraeger8335713a2015-12-08 16:55:27 +01002924 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2925 old_sca, kvm->arch.sca);
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002926 return 0;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002927}
2928
2929static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2930{
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002931 int rc;
2932
David Hildenbranda6940672016-08-08 22:39:32 +02002933 if (!kvm_s390_use_sca_entries()) {
2934 if (id < KVM_MAX_VCPUS)
2935 return true;
2936 return false;
2937 }
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002938 if (id < KVM_S390_BSCA_CPU_SLOTS)
2939 return true;
David Hildenbrand76a6dd72015-11-24 13:33:49 +01002940 if (!sclp.has_esca || !sclp.has_64bscao)
Eugene (jno) Dvurechenski5e044312015-04-22 18:08:39 +02002941 return false;
2942
2943 mutex_lock(&kvm->lock);
2944 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2945 mutex_unlock(&kvm->lock);
2946
2947 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
Eugene (jno) Dvurechenskia6e2f682015-04-21 15:31:59 +02002948}
2949
David Hildenbranddb0758b2016-02-15 09:42:25 +01002950/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2951static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2952{
2953 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002954 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002955 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand9c23a132016-02-17 21:53:33 +01002956 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002957}
2958
2959/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2960static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2961{
2962 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
David Hildenbrand9c23a132016-02-17 21:53:33 +01002963 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002964 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2965 vcpu->arch.cputm_start = 0;
David Hildenbrand9c23a132016-02-17 21:53:33 +01002966 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01002967}
2968
2969/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2970static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2971{
2972 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2973 vcpu->arch.cputm_enabled = true;
2974 __start_cpu_timer_accounting(vcpu);
2975}
2976
2977/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2978static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2979{
2980 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2981 __stop_cpu_timer_accounting(vcpu);
2982 vcpu->arch.cputm_enabled = false;
2983}
2984
2985static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2986{
2987 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2988 __enable_cpu_timer_accounting(vcpu);
2989 preempt_enable();
2990}
2991
2992static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2993{
2994 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2995 __disable_cpu_timer_accounting(vcpu);
2996 preempt_enable();
2997}
2998
David Hildenbrand4287f242016-02-15 09:40:12 +01002999/* set the cpu timer - may only be called from the VCPU thread itself */
3000void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
3001{
David Hildenbranddb0758b2016-02-15 09:42:25 +01003002 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
David Hildenbrand9c23a132016-02-17 21:53:33 +01003003 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003004 if (vcpu->arch.cputm_enabled)
3005 vcpu->arch.cputm_start = get_tod_clock_fast();
David Hildenbrand4287f242016-02-15 09:40:12 +01003006 vcpu->arch.sie_block->cputm = cputm;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003007 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
David Hildenbranddb0758b2016-02-15 09:42:25 +01003008 preempt_enable();
David Hildenbrand4287f242016-02-15 09:40:12 +01003009}
3010
David Hildenbranddb0758b2016-02-15 09:42:25 +01003011/* update and get the cpu timer - can also be called from other VCPU threads */
David Hildenbrand4287f242016-02-15 09:40:12 +01003012__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
3013{
David Hildenbrand9c23a132016-02-17 21:53:33 +01003014 unsigned int seq;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003015 __u64 value;
David Hildenbranddb0758b2016-02-15 09:42:25 +01003016
3017 if (unlikely(!vcpu->arch.cputm_enabled))
3018 return vcpu->arch.sie_block->cputm;
3019
David Hildenbrand9c23a132016-02-17 21:53:33 +01003020 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
3021 do {
3022 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
3023 /*
3024 * If the writer would ever execute a read in the critical
3025 * section, e.g. in irq context, we have a deadlock.
3026 */
3027 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
3028 value = vcpu->arch.sie_block->cputm;
3029 /* if cputm_start is 0, accounting is being started/stopped */
3030 if (likely(vcpu->arch.cputm_start))
3031 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
3032 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
3033 preempt_enable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01003034 return value;
David Hildenbrand4287f242016-02-15 09:40:12 +01003035}
3036
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003037void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
3038{
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003039
David Hildenbrand37d9df92015-03-11 16:47:33 +01003040 gmap_enable(vcpu->arch.enabled_gmap);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003041 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand5ebda312016-02-22 13:52:27 +01003042 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003043 __start_cpu_timer_accounting(vcpu);
David Hildenbrand01a745a2016-02-12 20:41:56 +01003044 vcpu->cpu = cpu;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003045}
3046
3047void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
3048{
David Hildenbrand01a745a2016-02-12 20:41:56 +01003049 vcpu->cpu = -1;
David Hildenbrand5ebda312016-02-22 13:52:27 +01003050 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
David Hildenbranddb0758b2016-02-15 09:42:25 +01003051 __stop_cpu_timer_accounting(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003052 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
David Hildenbrand37d9df92015-03-11 16:47:33 +01003053 vcpu->arch.enabled_gmap = gmap_get_enabled();
3054 gmap_disable(vcpu->arch.enabled_gmap);
Hendrik Brueckner9977e882015-06-10 12:53:42 +02003055
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003056}
3057
Dominik Dingel31928aa2014-12-04 15:47:07 +01003058void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003059{
Jason J. Herne72f25022014-11-25 09:46:02 -05003060 mutex_lock(&vcpu->kvm->lock);
Fan Zhangfdf03652015-05-13 10:58:41 +02003061 preempt_disable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003062 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
David Hildenbrandd16b52c2018-02-07 12:46:44 +01003063 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
Fan Zhangfdf03652015-05-13 10:58:41 +02003064 preempt_enable();
Jason J. Herne72f25022014-11-25 09:46:02 -05003065 mutex_unlock(&vcpu->kvm->lock);
David Hildenbrand25508822015-10-12 16:27:23 +02003066 if (!kvm_is_ucontrol(vcpu->kvm)) {
Dominik Dingeldafd0322014-12-02 16:53:21 +01003067 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
David Hildenbrandeaa78f32015-10-12 16:29:01 +02003068 sca_add_vcpu(vcpu);
David Hildenbrand25508822015-10-12 16:27:23 +02003069 }
David Hildenbrand6502a342016-06-21 14:19:51 +02003070 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
3071 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
David Hildenbrand37d9df92015-03-11 16:47:33 +01003072 /* make vcpu_load load the right gmap on the first trigger */
3073 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
Marcelo Tosatti42897d82012-11-27 23:29:02 -02003074}
3075
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003076static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
3077{
3078 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
3079 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
3080 return true;
3081 return false;
3082}
3083
3084static bool kvm_has_pckmo_ecc(struct kvm *kvm)
3085{
3086 /* At least one ECC subfunction must be present */
3087 return kvm_has_pckmo_subfunc(kvm, 32) ||
3088 kvm_has_pckmo_subfunc(kvm, 33) ||
3089 kvm_has_pckmo_subfunc(kvm, 34) ||
3090 kvm_has_pckmo_subfunc(kvm, 40) ||
3091 kvm_has_pckmo_subfunc(kvm, 41);
3092
3093}
3094
Tony Krowiak5102ee82014-06-27 14:46:01 -04003095static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
3096{
Tony Krowiake585b242018-09-25 19:16:18 -04003097 /*
3098 * If the AP instructions are not being interpreted and the MSAX3
3099 * facility is not configured for the guest, there is nothing to set up.
3100 */
3101 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
Tony Krowiak5102ee82014-06-27 14:46:01 -04003102 return;
3103
Tony Krowiake585b242018-09-25 19:16:18 -04003104 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
Tony Krowiaka374e892014-09-03 10:13:53 +02003105 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
Tony Krowiak37940fb2018-09-25 19:16:39 -04003106 vcpu->arch.sie_block->eca &= ~ECA_APIE;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003107 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
Tony Krowiaka374e892014-09-03 10:13:53 +02003108
Tony Krowiake585b242018-09-25 19:16:18 -04003109 if (vcpu->kvm->arch.crypto.apie)
3110 vcpu->arch.sie_block->eca |= ECA_APIE;
3111
3112 /* Set up protected key support */
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003113 if (vcpu->kvm->arch.crypto.aes_kw) {
Tony Krowiaka374e892014-09-03 10:13:53 +02003114 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
Christian Borntraeger8ec2fa52019-04-03 03:00:35 -04003115 /* ecc is also wrapped with AES key */
3116 if (kvm_has_pckmo_ecc(vcpu->kvm))
3117 vcpu->arch.sie_block->ecd |= ECD_ECC;
3118 }
3119
Tony Krowiaka374e892014-09-03 10:13:53 +02003120 if (vcpu->kvm->arch.crypto.dea_kw)
3121 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
Tony Krowiak5102ee82014-06-27 14:46:01 -04003122}
3123
Dominik Dingelb31605c2014-03-25 13:47:11 +01003124void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
3125{
3126 free_page(vcpu->arch.sie_block->cbrlo);
3127 vcpu->arch.sie_block->cbrlo = 0;
3128}
3129
3130int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
3131{
3132 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
3133 if (!vcpu->arch.sie_block->cbrlo)
3134 return -ENOMEM;
Dominik Dingelb31605c2014-03-25 13:47:11 +01003135 return 0;
3136}
3137
Michael Mueller91520f12015-02-27 14:32:11 +01003138static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
3139{
3140 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
3141
Michael Mueller91520f12015-02-27 14:32:11 +01003142 vcpu->arch.sie_block->ibc = model->ibc;
David Hildenbrand80bc79d2015-12-02 09:43:29 +01003143 if (test_kvm_facility(vcpu->kvm, 7))
David Hildenbrandc54f0d62015-12-02 08:53:52 +01003144 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
Michael Mueller91520f12015-02-27 14:32:11 +01003145}
3146
Sean Christophersonff72bb52019-12-18 13:55:20 -08003147static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu)
3148{
Dominik Dingelb31605c2014-03-25 13:47:11 +01003149 int rc = 0;
Janosch Frank29b40f12019-09-30 04:19:18 -04003150 u16 uvrc, uvrrc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003151
Cornelia Huck9e6dabe2011-11-17 11:00:41 +01003152 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
3153 CPUSTAT_SM |
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003154 CPUSTAT_STOPPED);
3155
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003156 if (test_kvm_facility(vcpu->kvm, 78))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003157 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
Guenther Hutzl53df84f2015-02-18 11:13:03 +01003158 else if (test_kvm_facility(vcpu->kvm, 8))
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003159 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
Guenther Hutzla4a4f192015-03-31 14:39:49 +02003160
Michael Mueller91520f12015-02-27 14:32:11 +01003161 kvm_s390_vcpu_setup_model(vcpu);
3162
David Hildenbrandbdab09f2016-04-12 11:07:49 +02003163 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
3164 if (MACHINE_HAS_ESOP)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003165 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
David Hildenbrandbd50e8e2016-03-04 12:23:55 +01003166 if (test_kvm_facility(vcpu->kvm, 9))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003167 vcpu->arch.sie_block->ecb |= ECB_SRSI;
David Hildenbrandf597d242016-04-22 16:26:49 +02003168 if (test_kvm_facility(vcpu->kvm, 73))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003169 vcpu->arch.sie_block->ecb |= ECB_TE;
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003170
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003171 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003172 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
Janosch Frankcd1836f2016-08-04 09:57:36 +02003173 if (test_kvm_facility(vcpu->kvm, 130))
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003174 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
3175 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
David Hildenbrand48ee7d32016-04-04 15:49:34 +02003176 if (sclp.has_cei)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003177 vcpu->arch.sie_block->eca |= ECA_CEI;
David Hildenbrand11ad65b2016-04-04 15:46:26 +02003178 if (sclp.has_ib)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003179 vcpu->arch.sie_block->eca |= ECA_IB;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003180 if (sclp.has_siif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003181 vcpu->arch.sie_block->eca |= ECA_SII;
David Hildenbrand37c5f6c2015-05-06 13:18:59 +02003182 if (sclp.has_sigpif)
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003183 vcpu->arch.sie_block->eca |= ECA_SIGPI;
Michael Mueller18280d82015-03-16 16:05:41 +01003184 if (test_kvm_facility(vcpu->kvm, 129)) {
David Hildenbrand0c9d8682017-03-13 11:48:28 +01003185 vcpu->arch.sie_block->eca |= ECA_VX;
3186 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
Eric Farman13211ea2014-04-30 13:39:46 -04003187 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003188 if (test_kvm_facility(vcpu->kvm, 139))
3189 vcpu->arch.sie_block->ecd |= ECD_MEF;
Christian Borntraegera3da7b42018-03-08 16:08:49 +00003190 if (test_kvm_facility(vcpu->kvm, 156))
3191 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
Michael Muellerd7c5cb02017-06-12 14:15:19 +02003192 if (vcpu->arch.sie_block->gd) {
3193 vcpu->arch.sie_block->eca |= ECA_AIV;
3194 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
3195 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
3196 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01003197 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
3198 | SDNXC;
Fan Zhangc6e5f162016-01-07 18:24:29 +08003199 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
Farhan Ali730cd632017-02-24 16:12:56 -05003200
3201 if (sclp.has_kss)
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003202 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
Farhan Ali730cd632017-02-24 16:12:56 -05003203 else
3204 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
Matthew Rosato5a5e6532013-01-29 11:48:20 -05003205
Dominik Dingele6db1d62015-05-07 15:41:57 +02003206 if (vcpu->kvm->arch.use_cmma) {
Dominik Dingelb31605c2014-03-25 13:47:11 +01003207 rc = kvm_s390_vcpu_setup_cmma(vcpu);
3208 if (rc)
3209 return rc;
Konstantin Weitzb31288f2013-04-17 17:36:29 +02003210 }
David Hildenbrand0ac96caf2014-12-12 15:17:31 +01003211 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
Christian Borntraegerca872302009-05-12 17:21:49 +02003212 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
Michael Mueller9d8d5782015-02-02 15:42:51 +01003213
Collin Walling67d49d52018-08-31 12:51:19 -04003214 vcpu->arch.sie_block->hpid = HPID_KVM;
3215
Tony Krowiak5102ee82014-06-27 14:46:01 -04003216 kvm_s390_vcpu_crypto_setup(vcpu);
3217
Janosch Frank29b40f12019-09-30 04:19:18 -04003218 mutex_lock(&vcpu->kvm->lock);
3219 if (kvm_s390_pv_is_protected(vcpu->kvm)) {
3220 rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc);
3221 if (rc)
3222 kvm_s390_vcpu_unsetup_cmma(vcpu);
3223 }
3224 mutex_unlock(&vcpu->kvm->lock);
3225
Dominik Dingelb31605c2014-03-25 13:47:11 +01003226 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003227}
3228
Sean Christopherson897cc382019-12-18 13:55:09 -08003229int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
3230{
3231 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
3232 return -EINVAL;
3233 return 0;
3234}
3235
Sean Christophersone529ef62019-12-18 13:55:15 -08003236int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003237{
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003238 struct sie_page *sie_page;
Sean Christopherson897cc382019-12-18 13:55:09 -08003239 int rc;
Carsten Otte4d475552011-10-18 12:27:12 +02003240
QingFeng Haoda72ca42017-06-07 11:41:19 +02003241 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003242 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
3243 if (!sie_page)
Sean Christophersone529ef62019-12-18 13:55:15 -08003244 return -ENOMEM;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003245
Michael Mueller7feb6bb2013-06-28 13:30:24 +02003246 vcpu->arch.sie_block = &sie_page->sie_block;
3247 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
3248
David Hildenbrandefed1102015-04-16 12:32:41 +02003249 /* the real guest size will always be smaller than msl */
3250 vcpu->arch.sie_block->mso = 0;
3251 vcpu->arch.sie_block->msl = sclp.hamax;
3252
Sean Christophersone529ef62019-12-18 13:55:15 -08003253 vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
Carsten Otteba5c1e92008-03-25 18:47:26 +01003254 spin_lock_init(&vcpu->arch.local_int.lock);
Sean Christophersone529ef62019-12-18 13:55:15 -08003255 vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
Michael Mueller4b9f9522017-06-23 13:51:25 +02003256 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
3257 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
David Hildenbrand9c23a132016-02-17 21:53:33 +01003258 seqcount_init(&vcpu->arch.cputm_seqcount);
Carsten Otteba5c1e92008-03-25 18:47:26 +01003259
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003260 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3261 kvm_clear_async_pf_completion_queue(vcpu);
3262 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
3263 KVM_SYNC_GPRS |
3264 KVM_SYNC_ACRS |
3265 KVM_SYNC_CRS |
3266 KVM_SYNC_ARCH0 |
3267 KVM_SYNC_PFAULT;
3268 kvm_s390_set_prefix(vcpu, 0);
3269 if (test_kvm_facility(vcpu->kvm, 64))
3270 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
3271 if (test_kvm_facility(vcpu->kvm, 82))
3272 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
3273 if (test_kvm_facility(vcpu->kvm, 133))
3274 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
3275 if (test_kvm_facility(vcpu->kvm, 156))
3276 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
3277 /* fprs can be synchronized via vrs, even if the guest has no vx. With
3278 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
3279 */
3280 if (MACHINE_HAS_VX)
3281 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
3282 else
3283 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
3284
3285 if (kvm_is_ucontrol(vcpu->kvm)) {
3286 rc = __kvm_ucontrol_vcpu_init(vcpu);
3287 if (rc)
Sean Christophersona2017f12019-12-18 13:55:11 -08003288 goto out_free_sie_block;
Sean Christopherson321f8ee2019-12-18 13:55:10 -08003289 }
3290
Sean Christophersone529ef62019-12-18 13:55:15 -08003291 VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
3292 vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
3293 trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003294
Sean Christophersonff72bb52019-12-18 13:55:20 -08003295 rc = kvm_s390_vcpu_setup(vcpu);
3296 if (rc)
3297 goto out_ucontrol_uninit;
Sean Christophersone529ef62019-12-18 13:55:15 -08003298 return 0;
3299
Sean Christophersonff72bb52019-12-18 13:55:20 -08003300out_ucontrol_uninit:
3301 if (kvm_is_ucontrol(vcpu->kvm))
3302 gmap_remove(vcpu->arch.gmap);
Wei Yongjun7b06bf22010-03-09 14:37:53 +08003303out_free_sie_block:
3304 free_page((unsigned long)(vcpu->arch.sie_block));
Sean Christophersone529ef62019-12-18 13:55:15 -08003305 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003306}
3307
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003308int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
3309{
David Hildenbrand9a022062014-08-05 17:40:47 +02003310 return kvm_s390_vcpu_has_irq(vcpu, 0);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003311}
3312
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003313bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
3314{
Longpeng(Mike)0546c632017-08-08 12:05:34 +08003315 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
Longpeng(Mike)199b5762017-08-08 12:05:32 +08003316}
3317
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003318void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003319{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003320 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003321 exit_sie(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003322}
3323
Christian Borntraeger27406cd2015-04-14 12:17:34 +02003324void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003325{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003326 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003327}
3328
Christian Borntraeger8e236542015-04-09 13:49:04 +02003329static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
3330{
Peter Zijlstra805de8f42015-04-24 01:12:32 +02003331 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
David Hildenbrand61a6df52015-05-12 08:41:40 +02003332 exit_sie(vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003333}
3334
David Hildenbrand9ea59722018-09-25 19:16:16 -04003335bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
3336{
3337 return atomic_read(&vcpu->arch.sie_block->prog20) &
3338 (PROG_BLOCK_SIE | PROG_REQUEST);
3339}
3340
Christian Borntraeger8e236542015-04-09 13:49:04 +02003341static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
3342{
Jason J. Herne9bf9fde2015-09-16 09:13:50 -04003343 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003344}
3345
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003346/*
David Hildenbrand9ea59722018-09-25 19:16:16 -04003347 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003348 * If the CPU is not running (e.g. waiting as idle) the function will
3349 * return immediately. */
3350void exit_sie(struct kvm_vcpu *vcpu)
3351{
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003352 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
David Hildenbrand9ea59722018-09-25 19:16:16 -04003353 kvm_s390_vsie_kick(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003354 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
3355 cpu_relax();
3356}
3357
Christian Borntraeger8e236542015-04-09 13:49:04 +02003358/* Kick a guest cpu out of SIE to process a request synchronously */
3359void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003360{
Christian Borntraeger8e236542015-04-09 13:49:04 +02003361 kvm_make_request(req, vcpu);
3362 kvm_s390_vcpu_request(vcpu);
Christian Borntraeger49b99e12013-05-17 14:41:35 +02003363}
3364
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003365static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
3366 unsigned long end)
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003367{
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003368 struct kvm *kvm = gmap->private;
3369 struct kvm_vcpu *vcpu;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003370 unsigned long prefix;
3371 int i;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003372
David Hildenbrand65d0b0d2015-04-27 16:29:34 +02003373 if (gmap_is_shadow(gmap))
3374 return;
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003375 if (start >= 1UL << 31)
3376 /* We are only interested in prefix pages */
3377 return;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003378 kvm_for_each_vcpu(i, vcpu, kvm) {
3379 /* match against both prefix pages */
Martin Schwidefsky414d3b02016-03-08 11:52:54 +01003380 prefix = kvm_s390_get_prefix(vcpu);
3381 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3382 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3383 start, end);
Christian Borntraeger8e236542015-04-09 13:49:04 +02003384 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003385 }
3386 }
3387}
3388
Christian Borntraeger8b905d22019-03-05 05:30:02 -05003389bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
3390{
3391 /* do not poll with more than halt_poll_max_steal percent of steal time */
3392 if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
3393 halt_poll_max_steal) {
3394 vcpu->stat.halt_no_poll_steal++;
3395 return true;
3396 }
3397 return false;
3398}
3399
Christoffer Dallb6d33832012-03-08 16:44:24 -05003400int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3401{
3402 /* kvm common code refers to this, but never calls it */
3403 BUG();
3404 return 0;
3405}
3406
Carsten Otte14eebd92012-05-15 14:15:26 +02003407static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3408 struct kvm_one_reg *reg)
3409{
3410 int r = -EINVAL;
3411
3412 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003413 case KVM_REG_S390_TODPR:
3414 r = put_user(vcpu->arch.sie_block->todpr,
3415 (u32 __user *)reg->addr);
3416 break;
3417 case KVM_REG_S390_EPOCHDIFF:
3418 r = put_user(vcpu->arch.sie_block->epoch,
3419 (u64 __user *)reg->addr);
3420 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003421 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003422 r = put_user(kvm_s390_get_cpu_timer(vcpu),
Jason J. herne46a6dd12012-05-15 14:15:28 +02003423 (u64 __user *)reg->addr);
3424 break;
3425 case KVM_REG_S390_CLOCK_COMP:
3426 r = put_user(vcpu->arch.sie_block->ckc,
3427 (u64 __user *)reg->addr);
3428 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003429 case KVM_REG_S390_PFTOKEN:
3430 r = put_user(vcpu->arch.pfault_token,
3431 (u64 __user *)reg->addr);
3432 break;
3433 case KVM_REG_S390_PFCOMPARE:
3434 r = put_user(vcpu->arch.pfault_compare,
3435 (u64 __user *)reg->addr);
3436 break;
3437 case KVM_REG_S390_PFSELECT:
3438 r = put_user(vcpu->arch.pfault_select,
3439 (u64 __user *)reg->addr);
3440 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003441 case KVM_REG_S390_PP:
3442 r = put_user(vcpu->arch.sie_block->pp,
3443 (u64 __user *)reg->addr);
3444 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003445 case KVM_REG_S390_GBEA:
3446 r = put_user(vcpu->arch.sie_block->gbea,
3447 (u64 __user *)reg->addr);
3448 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003449 default:
3450 break;
3451 }
3452
3453 return r;
3454}
3455
3456static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3457 struct kvm_one_reg *reg)
3458{
3459 int r = -EINVAL;
David Hildenbrand4287f242016-02-15 09:40:12 +01003460 __u64 val;
Carsten Otte14eebd92012-05-15 14:15:26 +02003461
3462 switch (reg->id) {
Carsten Otte29b7c712012-05-15 14:15:27 +02003463 case KVM_REG_S390_TODPR:
3464 r = get_user(vcpu->arch.sie_block->todpr,
3465 (u32 __user *)reg->addr);
3466 break;
3467 case KVM_REG_S390_EPOCHDIFF:
3468 r = get_user(vcpu->arch.sie_block->epoch,
3469 (u64 __user *)reg->addr);
3470 break;
Jason J. herne46a6dd12012-05-15 14:15:28 +02003471 case KVM_REG_S390_CPU_TIMER:
David Hildenbrand4287f242016-02-15 09:40:12 +01003472 r = get_user(val, (u64 __user *)reg->addr);
3473 if (!r)
3474 kvm_s390_set_cpu_timer(vcpu, val);
Jason J. herne46a6dd12012-05-15 14:15:28 +02003475 break;
3476 case KVM_REG_S390_CLOCK_COMP:
3477 r = get_user(vcpu->arch.sie_block->ckc,
3478 (u64 __user *)reg->addr);
3479 break;
Dominik Dingel536336c2013-09-30 10:55:33 +02003480 case KVM_REG_S390_PFTOKEN:
3481 r = get_user(vcpu->arch.pfault_token,
3482 (u64 __user *)reg->addr);
David Hildenbrand9fbd8082014-10-09 15:01:38 +02003483 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3484 kvm_clear_async_pf_completion_queue(vcpu);
Dominik Dingel536336c2013-09-30 10:55:33 +02003485 break;
3486 case KVM_REG_S390_PFCOMPARE:
3487 r = get_user(vcpu->arch.pfault_compare,
3488 (u64 __user *)reg->addr);
3489 break;
3490 case KVM_REG_S390_PFSELECT:
3491 r = get_user(vcpu->arch.pfault_select,
3492 (u64 __user *)reg->addr);
3493 break;
Christian Borntraeger672550f2014-02-10 15:32:19 +01003494 case KVM_REG_S390_PP:
3495 r = get_user(vcpu->arch.sie_block->pp,
3496 (u64 __user *)reg->addr);
3497 break;
Christian Borntraegerafa45ff2014-02-10 15:39:23 +01003498 case KVM_REG_S390_GBEA:
3499 r = get_user(vcpu->arch.sie_block->gbea,
3500 (u64 __user *)reg->addr);
3501 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02003502 default:
3503 break;
3504 }
3505
3506 return r;
3507}
Christoffer Dallb6d33832012-03-08 16:44:24 -05003508
Janosch Frank7de3f142020-01-31 05:02:02 -05003509static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003510{
Janosch Frank7de3f142020-01-31 05:02:02 -05003511 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI;
3512 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
3513 memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb));
3514
3515 kvm_clear_async_pf_completion_queue(vcpu);
3516 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
3517 kvm_s390_vcpu_stop(vcpu);
3518 kvm_s390_clear_local_irqs(vcpu);
3519}
3520
3521static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3522{
3523 /* Initial reset is a superset of the normal reset */
3524 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
3525
3526 /* this equals initial cpu reset in pop, but we don't switch to ESA */
3527 vcpu->arch.sie_block->gpsw.mask = 0;
3528 vcpu->arch.sie_block->gpsw.addr = 0;
3529 kvm_s390_set_prefix(vcpu, 0);
3530 kvm_s390_set_cpu_timer(vcpu, 0);
3531 vcpu->arch.sie_block->ckc = 0;
Janosch Frank7de3f142020-01-31 05:02:02 -05003532 memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
3533 vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
3534 vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
3535 vcpu->run->s.regs.fpc = 0;
Janosch Frank0f303502020-02-10 04:27:47 -05003536 /*
3537 * Do not reset these registers in the protected case, as some of
3538 * them are overlayed and they are not accessible in this case
3539 * anyway.
3540 */
3541 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3542 vcpu->arch.sie_block->gbea = 1;
3543 vcpu->arch.sie_block->pp = 0;
3544 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3545 vcpu->arch.sie_block->todpr = 0;
3546 }
Janosch Frank7de3f142020-01-31 05:02:02 -05003547}
3548
3549static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu)
3550{
3551 struct kvm_sync_regs *regs = &vcpu->run->s.regs;
3552
3553 /* Clear reset is a superset of the initial reset */
3554 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3555
3556 memset(&regs->gprs, 0, sizeof(regs->gprs));
3557 memset(&regs->vrs, 0, sizeof(regs->vrs));
3558 memset(&regs->acrs, 0, sizeof(regs->acrs));
3559 memset(&regs->gscb, 0, sizeof(regs->gscb));
3560
3561 regs->etoken = 0;
3562 regs->etoken_extension = 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003563}
3564
3565int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3566{
Christoffer Dall875656f2017-12-04 21:35:27 +01003567 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003568 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
Christoffer Dall875656f2017-12-04 21:35:27 +01003569 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003570 return 0;
3571}
3572
3573int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3574{
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003575 vcpu_load(vcpu);
Christian Borntraeger5a32c1a2012-01-11 11:20:32 +01003576 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
Christoffer Dall1fc9b762017-12-04 21:35:26 +01003577 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003578 return 0;
3579}
3580
3581int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3582 struct kvm_sregs *sregs)
3583{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003584 vcpu_load(vcpu);
3585
Christian Borntraeger59674c12012-01-11 11:20:33 +01003586 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003587 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01003588
3589 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003590 return 0;
3591}
3592
3593int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3594 struct kvm_sregs *sregs)
3595{
Christoffer Dallbcdec412017-12-04 21:35:28 +01003596 vcpu_load(vcpu);
3597
Christian Borntraeger59674c12012-01-11 11:20:33 +01003598 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003599 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
Christoffer Dallbcdec412017-12-04 21:35:28 +01003600
3601 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003602 return 0;
3603}
3604
3605int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3606{
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003607 int ret = 0;
3608
3609 vcpu_load(vcpu);
3610
3611 if (test_fp_ctl(fpu->fpc)) {
3612 ret = -EINVAL;
3613 goto out;
3614 }
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003615 vcpu->run->s.regs.fpc = fpu->fpc;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003616 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003617 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3618 (freg_t *) fpu->fprs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003619 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003620 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
Christoffer Dall6a96bc72017-12-04 21:35:35 +01003621
3622out:
3623 vcpu_put(vcpu);
3624 return ret;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003625}
3626
3627int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3628{
Christoffer Dall13931232017-12-04 21:35:34 +01003629 vcpu_load(vcpu);
3630
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003631 /* make sure we have the latest values */
3632 save_fpu_regs();
3633 if (MACHINE_HAS_VX)
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003634 convert_vx_to_fp((freg_t *) fpu->fprs,
3635 (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01003636 else
David Hildenbranda7d4b8f2016-08-16 14:38:24 +02003637 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
Christian Borntraegere1788bb2016-11-22 09:29:38 +01003638 fpu->fpc = vcpu->run->s.regs.fpc;
Christoffer Dall13931232017-12-04 21:35:34 +01003639
3640 vcpu_put(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003641 return 0;
3642}
3643
3644static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3645{
3646 int rc = 0;
3647
David Hildenbrand7a42fdc2014-05-05 16:26:19 +02003648 if (!is_vcpu_stopped(vcpu))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003649 rc = -EBUSY;
Carsten Otted7b0b5e2009-11-19 14:21:16 +01003650 else {
3651 vcpu->run->psw_mask = psw.mask;
3652 vcpu->run->psw_addr = psw.addr;
3653 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003654 return rc;
3655}
3656
3657int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3658 struct kvm_translation *tr)
3659{
3660 return -EINVAL; /* not implemented yet */
3661}
3662
David Hildenbrand27291e22014-01-23 12:26:52 +01003663#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3664 KVM_GUESTDBG_USE_HW_BP | \
3665 KVM_GUESTDBG_ENABLE)
3666
Jan Kiszkad0bfb942008-12-15 13:52:10 +01003667int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3668 struct kvm_guest_debug *dbg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003669{
David Hildenbrand27291e22014-01-23 12:26:52 +01003670 int rc = 0;
3671
Christoffer Dall66b56562017-12-04 21:35:33 +01003672 vcpu_load(vcpu);
3673
David Hildenbrand27291e22014-01-23 12:26:52 +01003674 vcpu->guest_debug = 0;
3675 kvm_s390_clear_bp_data(vcpu);
3676
Christoffer Dall66b56562017-12-04 21:35:33 +01003677 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
3678 rc = -EINVAL;
3679 goto out;
3680 }
3681 if (!sclp.has_gpere) {
3682 rc = -EINVAL;
3683 goto out;
3684 }
David Hildenbrand27291e22014-01-23 12:26:52 +01003685
3686 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3687 vcpu->guest_debug = dbg->control;
3688 /* enforce guest PER */
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003689 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003690
3691 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3692 rc = kvm_s390_import_bp_data(vcpu, dbg);
3693 } else {
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003694 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003695 vcpu->arch.guestdbg.last_bp = 0;
3696 }
3697
3698 if (rc) {
3699 vcpu->guest_debug = 0;
3700 kvm_s390_clear_bp_data(vcpu);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003701 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
David Hildenbrand27291e22014-01-23 12:26:52 +01003702 }
3703
Christoffer Dall66b56562017-12-04 21:35:33 +01003704out:
3705 vcpu_put(vcpu);
David Hildenbrand27291e22014-01-23 12:26:52 +01003706 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003707}
3708
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003709int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3710 struct kvm_mp_state *mp_state)
3711{
Christoffer Dallfd232562017-12-04 21:35:30 +01003712 int ret;
3713
3714 vcpu_load(vcpu);
3715
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003716 /* CHECK_STOP and LOAD are not supported yet */
Christoffer Dallfd232562017-12-04 21:35:30 +01003717 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3718 KVM_MP_STATE_OPERATING;
3719
3720 vcpu_put(vcpu);
3721 return ret;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003722}
3723
3724int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3725 struct kvm_mp_state *mp_state)
3726{
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003727 int rc = 0;
3728
Christoffer Dalle83dff52017-12-04 21:35:31 +01003729 vcpu_load(vcpu);
3730
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003731 /* user space knows about this interface - let it control the state */
3732 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3733
3734 switch (mp_state->mp_state) {
3735 case KVM_MP_STATE_STOPPED:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003736 rc = kvm_s390_vcpu_stop(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003737 break;
3738 case KVM_MP_STATE_OPERATING:
Janosch Frankfe28c7862019-05-15 13:24:30 +02003739 rc = kvm_s390_vcpu_start(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003740 break;
3741 case KVM_MP_STATE_LOAD:
Janosch Frank7c36a3f2019-09-02 08:34:44 +02003742 if (!kvm_s390_pv_cpu_is_protected(vcpu)) {
3743 rc = -ENXIO;
3744 break;
3745 }
3746 rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD);
3747 break;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003748 case KVM_MP_STATE_CHECK_STOP:
Joe Perches3b684a42020-03-10 21:51:32 -07003749 fallthrough; /* CHECK_STOP and LOAD are not supported yet */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003750 default:
3751 rc = -ENXIO;
3752 }
3753
Christoffer Dalle83dff52017-12-04 21:35:31 +01003754 vcpu_put(vcpu);
David Hildenbrand6352e4d2014-04-10 17:35:00 +02003755 return rc;
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03003756}
3757
David Hildenbrand8ad35752014-03-14 11:00:21 +01003758static bool ibs_enabled(struct kvm_vcpu *vcpu)
3759{
David Hildenbrand8d5fb0d2018-01-23 18:05:31 +01003760 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003761}
3762
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003763static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3764{
David Hildenbrand8ad35752014-03-14 11:00:21 +01003765retry:
Christian Borntraeger8e236542015-04-09 13:49:04 +02003766 kvm_s390_vcpu_request_handled(vcpu);
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +02003767 if (!kvm_request_pending(vcpu))
Christian Borntraeger586b7cc2015-07-28 15:03:05 +02003768 return 0;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003769 /*
3770 * We use MMU_RELOAD just to re-arm the ipte notifier for the
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003771 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003772 * This ensures that the ipte instruction for this request has
3773 * already finished. We might race against a second unmapper that
3774 * wants to set the blocking bit. Lets just retry the request loop.
3775 */
David Hildenbrand8ad35752014-03-14 11:00:21 +01003776 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003777 int rc;
Martin Schwidefskyb2d73b22016-03-08 11:54:42 +01003778 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3779 kvm_s390_get_prefix(vcpu),
3780 PAGE_SIZE * 2, PROT_WRITE);
Julius Niedworokaca411a2016-08-03 16:39:55 +02003781 if (rc) {
3782 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003783 return rc;
Julius Niedworokaca411a2016-08-03 16:39:55 +02003784 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003785 goto retry;
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003786 }
David Hildenbrand8ad35752014-03-14 11:00:21 +01003787
David Hildenbrandd3d692c2014-07-29 08:53:36 +02003788 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3789 vcpu->arch.sie_block->ihcpu = 0xffff;
3790 goto retry;
3791 }
3792
David Hildenbrand8ad35752014-03-14 11:00:21 +01003793 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3794 if (!ibs_enabled(vcpu)) {
3795 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
David Hildenbrandef8f4f42018-01-23 18:05:29 +01003796 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003797 }
3798 goto retry;
3799 }
3800
3801 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3802 if (ibs_enabled(vcpu)) {
3803 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
David Hildenbrand9daecfc2018-01-23 18:05:30 +01003804 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
David Hildenbrand8ad35752014-03-14 11:00:21 +01003805 }
3806 goto retry;
3807 }
3808
David Hildenbrand6502a342016-06-21 14:19:51 +02003809 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3810 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3811 goto retry;
3812 }
3813
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003814 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3815 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003816 * Disable CMM virtualization; we will emulate the ESSA
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003817 * instruction manually, in order to provide additional
3818 * functionalities needed for live migration.
3819 */
3820 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3821 goto retry;
3822 }
3823
3824 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3825 /*
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003826 * Re-enable CMM virtualization if CMMA is available and
3827 * CMM has been used.
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003828 */
3829 if ((vcpu->kvm->arch.use_cmma) &&
Janosch Frankc9f0a2b2018-02-16 12:16:14 +01003830 (vcpu->kvm->mm->context.uses_cmm))
Claudio Imbrenda190df4a2016-08-04 17:54:42 +02003831 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3832 goto retry;
3833 }
3834
David Hildenbrand0759d062014-05-13 16:54:32 +02003835 /* nothing to do, just clear the request */
Radim Krčmář72875d82017-04-26 22:32:19 +02003836 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
David Hildenbrand3194cdb2018-09-25 19:16:17 -04003837 /* we left the vsie handler, nothing to do, just clear the request */
3838 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
David Hildenbrand0759d062014-05-13 16:54:32 +02003839
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003840 return 0;
3841}
3842
David Hildenbrand0e7def52018-02-07 12:46:43 +01003843void kvm_s390_set_tod_clock(struct kvm *kvm,
3844 const struct kvm_s390_vm_tod_clock *gtod)
Collin L. Walling8fa16962016-07-26 15:29:44 -04003845{
3846 struct kvm_vcpu *vcpu;
3847 struct kvm_s390_tod_clock_ext htod;
3848 int i;
3849
3850 mutex_lock(&kvm->lock);
3851 preempt_disable();
3852
3853 get_tod_clock_ext((char *)&htod);
3854
3855 kvm->arch.epoch = gtod->tod - htod.tod;
David Hildenbrand0e7def52018-02-07 12:46:43 +01003856 kvm->arch.epdx = 0;
3857 if (test_kvm_facility(kvm, 139)) {
3858 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3859 if (kvm->arch.epoch > gtod->tod)
3860 kvm->arch.epdx -= 1;
3861 }
Collin L. Walling8fa16962016-07-26 15:29:44 -04003862
3863 kvm_s390_vcpu_block_all(kvm);
3864 kvm_for_each_vcpu(i, vcpu, kvm) {
3865 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3866 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3867 }
3868
3869 kvm_s390_vcpu_unblock_all(kvm);
3870 preempt_enable();
3871 mutex_unlock(&kvm->lock);
3872}
3873
Thomas Huthfa576c52014-05-06 17:20:16 +02003874/**
3875 * kvm_arch_fault_in_page - fault-in guest page if necessary
3876 * @vcpu: The corresponding virtual cpu
3877 * @gpa: Guest physical address
3878 * @writable: Whether the page should be writable or not
3879 *
3880 * Make sure that a guest page has been faulted-in on the host.
3881 *
3882 * Return: Zero on success, negative error code otherwise.
3883 */
3884long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003885{
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02003886 return gmap_fault(vcpu->arch.gmap, gpa,
3887 writable ? FAULT_FLAG_WRITE : 0);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02003888}
3889
Dominik Dingel3c038e62013-10-07 17:11:48 +02003890static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3891 unsigned long token)
3892{
3893 struct kvm_s390_interrupt inti;
Jens Freimann383d0b02014-07-29 15:11:49 +02003894 struct kvm_s390_irq irq;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003895
3896 if (start_token) {
Jens Freimann383d0b02014-07-29 15:11:49 +02003897 irq.u.ext.ext_params2 = token;
3898 irq.type = KVM_S390_INT_PFAULT_INIT;
3899 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
Dominik Dingel3c038e62013-10-07 17:11:48 +02003900 } else {
3901 inti.type = KVM_S390_INT_PFAULT_DONE;
Jens Freimann383d0b02014-07-29 15:11:49 +02003902 inti.parm64 = token;
Dominik Dingel3c038e62013-10-07 17:11:48 +02003903 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3904 }
3905}
3906
3907void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3908 struct kvm_async_pf *work)
3909{
3910 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3911 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3912}
3913
3914void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3915 struct kvm_async_pf *work)
3916{
3917 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3918 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3919}
3920
3921void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3922 struct kvm_async_pf *work)
3923{
3924 /* s390 will always inject the page directly */
3925}
3926
3927bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3928{
3929 /*
3930 * s390 will always inject the page directly,
3931 * but we still want check_async_completion to cleanup
3932 */
3933 return true;
3934}
3935
3936static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3937{
3938 hva_t hva;
3939 struct kvm_arch_async_pf arch;
3940 int rc;
3941
3942 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3943 return 0;
3944 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3945 vcpu->arch.pfault_compare)
3946 return 0;
3947 if (psw_extint_disabled(vcpu))
3948 return 0;
David Hildenbrand9a022062014-08-05 17:40:47 +02003949 if (kvm_s390_vcpu_has_irq(vcpu, 0))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003950 return 0;
David Hildenbrandb9224cd2018-04-30 17:55:24 +02003951 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003952 return 0;
3953 if (!vcpu->arch.gmap->pfault_enabled)
3954 return 0;
3955
Heiko Carstens81480cc2014-01-01 16:36:07 +01003956 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3957 hva += current->thread.gmap_addr & ~PAGE_MASK;
3958 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
Dominik Dingel3c038e62013-10-07 17:11:48 +02003959 return 0;
3960
3961 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3962 return rc;
3963}
3964
Thomas Huth3fb4c402013-09-12 10:33:43 +02003965static int vcpu_pre_run(struct kvm_vcpu *vcpu)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003966{
Thomas Huth3fb4c402013-09-12 10:33:43 +02003967 int rc, cpuflags;
Carsten Ottee168bf82012-01-04 10:25:22 +01003968
Dominik Dingel3c038e62013-10-07 17:11:48 +02003969 /*
3970 * On s390 notifications for arriving pages will be delivered directly
3971 * to the guest but the house keeping for completed pfaults is
3972 * handled outside the worker.
3973 */
3974 kvm_check_async_pf_completion(vcpu);
3975
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01003976 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3977 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
Heiko Carstensb0c632d2008-03-25 18:47:20 +01003978
3979 if (need_resched())
3980 schedule();
3981
Martin Schwidefskyd3a73ac2014-04-15 12:55:07 +02003982 if (test_cpu_flag(CIF_MCCK_PENDING))
Christian Borntraeger71cde582008-05-21 13:37:34 +02003983 s390_handle_mcck();
3984
Jens Freimann79395032014-04-17 10:10:30 +02003985 if (!kvm_is_ucontrol(vcpu->kvm)) {
3986 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3987 if (rc)
3988 return rc;
3989 }
Carsten Otte0ff31862008-05-21 13:37:37 +02003990
Christian Borntraeger2c70fe42013-05-17 14:41:36 +02003991 rc = kvm_s390_handle_requests(vcpu);
3992 if (rc)
3993 return rc;
3994
David Hildenbrand27291e22014-01-23 12:26:52 +01003995 if (guestdbg_enabled(vcpu)) {
3996 kvm_s390_backup_guest_per_regs(vcpu);
3997 kvm_s390_patch_guest_per_regs(vcpu);
3998 }
3999
Michael Mueller9f30f622019-01-31 09:52:44 +01004000 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
4001
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004002 vcpu->arch.sie_block->icptcode = 0;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004003 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
4004 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
4005 trace_kvm_s390_sie_enter(vcpu, cpuflags);
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004006
Thomas Huth3fb4c402013-09-12 10:33:43 +02004007 return 0;
4008}
4009
Thomas Huth492d8642015-02-10 16:11:01 +01004010static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
4011{
David Hildenbrand56317922016-01-12 17:37:58 +01004012 struct kvm_s390_pgm_info pgm_info = {
4013 .code = PGM_ADDRESSING,
4014 };
4015 u8 opcode, ilen;
Thomas Huth492d8642015-02-10 16:11:01 +01004016 int rc;
4017
4018 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
4019 trace_kvm_s390_sie_fault(vcpu);
4020
4021 /*
4022 * We want to inject an addressing exception, which is defined as a
4023 * suppressing or terminating exception. However, since we came here
4024 * by a DAT access exception, the PSW still points to the faulting
4025 * instruction since DAT exceptions are nullifying. So we've got
4026 * to look up the current opcode to get the length of the instruction
4027 * to be able to forward the PSW.
4028 */
David Hildenbrand3fa8cad72016-05-24 12:00:49 +02004029 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
David Hildenbrand56317922016-01-12 17:37:58 +01004030 ilen = insn_length(opcode);
David Hildenbrand9b0d7212016-01-12 17:40:54 +01004031 if (rc < 0) {
4032 return rc;
4033 } else if (rc) {
4034 /* Instruction-Fetching Exceptions - we can't detect the ilen.
4035 * Forward by arbitrary ilc, injection will take care of
4036 * nullification if necessary.
4037 */
4038 pgm_info = vcpu->arch.pgm;
4039 ilen = 4;
4040 }
David Hildenbrand56317922016-01-12 17:37:58 +01004041 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
4042 kvm_s390_forward_psw(vcpu, ilen);
4043 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
Thomas Huth492d8642015-02-10 16:11:01 +01004044}
4045
Thomas Huth3fb4c402013-09-12 10:33:43 +02004046static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
4047{
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004048 struct mcck_volatile_info *mcck_info;
4049 struct sie_page *sie_page;
4050
Dominik Dingel2b29a9f2013-07-26 15:04:00 +02004051 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
4052 vcpu->arch.sie_block->icptcode);
4053 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
4054
David Hildenbrand27291e22014-01-23 12:26:52 +01004055 if (guestdbg_enabled(vcpu))
4056 kvm_s390_restore_guest_per_regs(vcpu);
4057
Christian Borntraeger7ec7c8c2015-12-02 14:27:03 +01004058 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
4059 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004060
QingFeng Hao4d62fcc2017-06-07 12:03:05 +02004061 if (exit_reason == -EINTR) {
4062 VCPU_EVENT(vcpu, 3, "%s", "machine check");
4063 sie_page = container_of(vcpu->arch.sie_block,
4064 struct sie_page, sie_block);
4065 mcck_info = &sie_page->mcck_info;
4066 kvm_s390_reinject_machine_check(vcpu, mcck_info);
4067 return 0;
4068 }
4069
David Hildenbrand71f116b2015-10-19 16:24:28 +02004070 if (vcpu->arch.sie_block->icptcode > 0) {
4071 int rc = kvm_handle_sie_intercept(vcpu);
4072
4073 if (rc != -EOPNOTSUPP)
4074 return rc;
4075 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
4076 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
4077 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
4078 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
4079 return -EREMOTE;
4080 } else if (exit_reason != -EFAULT) {
4081 vcpu->stat.exit_null++;
4082 return 0;
Thomas Huth210b16072013-09-19 16:26:18 +02004083 } else if (kvm_is_ucontrol(vcpu->kvm)) {
4084 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
4085 vcpu->run->s390_ucontrol.trans_exc_code =
4086 current->thread.gmap_addr;
4087 vcpu->run->s390_ucontrol.pgm_code = 0x10;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004088 return -EREMOTE;
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004089 } else if (current->thread.gmap_pfault) {
Dominik Dingel3c038e62013-10-07 17:11:48 +02004090 trace_kvm_s390_major_guest_pfault(vcpu);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004091 current->thread.gmap_pfault = 0;
David Hildenbrand71f116b2015-10-19 16:24:28 +02004092 if (kvm_arch_setup_async_pf(vcpu))
4093 return 0;
4094 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
Dominik Dingel24eb3a82013-06-17 16:25:18 +02004095 }
David Hildenbrand71f116b2015-10-19 16:24:28 +02004096 return vcpu_post_run_fault_in_sie(vcpu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004097}
4098
Janosch Frank3adae0b2019-12-13 08:26:06 -05004099#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK)
Thomas Huth3fb4c402013-09-12 10:33:43 +02004100static int __vcpu_run(struct kvm_vcpu *vcpu)
4101{
4102 int rc, exit_reason;
Janosch Frankc8aac232019-05-08 15:52:00 +02004103 struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004104
Thomas Huth800c1062013-09-12 10:33:45 +02004105 /*
4106 * We try to hold kvm->srcu during most of vcpu_run (except when run-
4107 * ning the guest), so that memslots (and other stuff) are protected
4108 */
4109 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4110
Thomas Hutha76ccff2013-09-12 10:33:44 +02004111 do {
4112 rc = vcpu_pre_run(vcpu);
4113 if (rc)
4114 break;
Thomas Huth3fb4c402013-09-12 10:33:43 +02004115
Thomas Huth800c1062013-09-12 10:33:45 +02004116 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Thomas Hutha76ccff2013-09-12 10:33:44 +02004117 /*
4118 * As PF_VCPU will be used in fault handler, between
4119 * guest_enter and guest_exit should be no uaccess.
4120 */
Christian Borntraeger0097d122015-04-30 13:43:30 +02004121 local_irq_disable();
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004122 guest_enter_irqoff();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004123 __disable_cpu_timer_accounting(vcpu);
Christian Borntraeger0097d122015-04-30 13:43:30 +02004124 local_irq_enable();
Janosch Frankc8aac232019-05-08 15:52:00 +02004125 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4126 memcpy(sie_page->pv_grregs,
4127 vcpu->run->s.regs.gprs,
4128 sizeof(sie_page->pv_grregs));
4129 }
Thomas Hutha76ccff2013-09-12 10:33:44 +02004130 exit_reason = sie64a(vcpu->arch.sie_block,
4131 vcpu->run->s.regs.gprs);
Janosch Frankc8aac232019-05-08 15:52:00 +02004132 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4133 memcpy(vcpu->run->s.regs.gprs,
4134 sie_page->pv_grregs,
4135 sizeof(sie_page->pv_grregs));
Janosch Frank3adae0b2019-12-13 08:26:06 -05004136 /*
4137 * We're not allowed to inject interrupts on intercepts
4138 * that leave the guest state in an "in-between" state
4139 * where the next SIE entry will do a continuation.
4140 * Fence interrupts in our "internal" PSW.
4141 */
4142 if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR ||
4143 vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) {
4144 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4145 }
Janosch Frankc8aac232019-05-08 15:52:00 +02004146 }
Christian Borntraeger0097d122015-04-30 13:43:30 +02004147 local_irq_disable();
David Hildenbranddb0758b2016-02-15 09:42:25 +01004148 __enable_cpu_timer_accounting(vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02004149 guest_exit_irqoff();
Christian Borntraeger0097d122015-04-30 13:43:30 +02004150 local_irq_enable();
Thomas Huth800c1062013-09-12 10:33:45 +02004151 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004152
Thomas Hutha76ccff2013-09-12 10:33:44 +02004153 rc = vcpu_post_run(vcpu, exit_reason);
David Hildenbrand27291e22014-01-23 12:26:52 +01004154 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
Thomas Huth3fb4c402013-09-12 10:33:43 +02004155
Thomas Huth800c1062013-09-12 10:33:45 +02004156 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
Carsten Ottee168bf82012-01-04 10:25:22 +01004157 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004158}
4159
Janosch Frank811ea792019-06-14 13:11:21 +02004160static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004161{
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004162 struct runtime_instr_cb *riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004163 struct gs_cb *gscb;
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004164
4165 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004166 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004167 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
4168 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
David Hildenbrandb028ee32014-07-17 10:47:43 +02004169 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
David Hildenbrandb028ee32014-07-17 10:47:43 +02004170 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
4171 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
4172 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
4173 }
4174 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
4175 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
4176 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
4177 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
David Hildenbrand9fbd8082014-10-09 15:01:38 +02004178 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
4179 kvm_clear_async_pf_completion_queue(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004180 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004181 /*
4182 * If userspace sets the riccb (e.g. after migration) to a valid state,
4183 * we should enable RI here instead of doing the lazy enablement.
4184 */
4185 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004186 test_kvm_facility(vcpu->kvm, 64) &&
Alice Frosibb59c2d2017-09-14 12:35:45 +02004187 riccb->v &&
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004188 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
Christian Borntraeger4d5f2c02017-02-09 17:15:41 +01004189 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
David Hildenbrand0c9d8682017-03-13 11:48:28 +01004190 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
Fan Zhang80cd8762016-08-15 04:53:22 +02004191 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004192 /*
4193 * If userspace sets the gscb (e.g. after migration) to non-zero,
4194 * we should enable GS here instead of doing the lazy enablement.
4195 */
4196 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
4197 test_kvm_facility(vcpu->kvm, 133) &&
4198 gscb->gssm &&
4199 !vcpu->arch.gs_enabled) {
4200 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
4201 vcpu->arch.sie_block->ecb |= ECB_GS;
4202 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
4203 vcpu->arch.gs_enabled = 1;
Fan Zhang80cd8762016-08-15 04:53:22 +02004204 }
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004205 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
4206 test_kvm_facility(vcpu->kvm, 82)) {
4207 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
4208 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
4209 }
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004210 if (MACHINE_HAS_GS) {
4211 preempt_disable();
4212 __ctl_set_bit(2, 4);
4213 if (current->thread.gs_cb) {
4214 vcpu->arch.host_gscb = current->thread.gs_cb;
4215 save_gs_cb(vcpu->arch.host_gscb);
4216 }
4217 if (vcpu->arch.gs_enabled) {
4218 current->thread.gs_cb = (struct gs_cb *)
4219 &vcpu->run->s.regs.gscb;
4220 restore_gs_cb(current->thread.gs_cb);
4221 }
4222 preempt_enable();
4223 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004224 /* SIE will load etoken directly from SDNX and therefore kvm_run */
Janosch Frank811ea792019-06-14 13:11:21 +02004225}
4226
4227static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4228{
4229 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
4230 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
4231 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
4232 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
4233 /* some control register changes require a tlb flush */
4234 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4235 }
4236 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4237 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
4238 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
4239 }
4240 save_access_regs(vcpu->arch.host_acrs);
4241 restore_access_regs(vcpu->run->s.regs.acrs);
4242 /* save host (userspace) fprs/vrs */
4243 save_fpu_regs();
4244 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
4245 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
4246 if (MACHINE_HAS_VX)
4247 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
4248 else
4249 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
4250 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
4251 if (test_fp_ctl(current->thread.fpu.fpc))
4252 /* User space provided an invalid FPC, let's clear it */
4253 current->thread.fpu.fpc = 0;
4254
4255 /* Sync fmt2 only data */
4256 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
4257 sync_regs_fmt2(vcpu, kvm_run);
4258 } else {
4259 /*
4260 * In several places we have to modify our internal view to
4261 * not do things that are disallowed by the ultravisor. For
4262 * example we must not inject interrupts after specific exits
4263 * (e.g. 112 prefix page not secure). We do this by turning
4264 * off the machine check, external and I/O interrupt bits
4265 * of our PSW copy. To avoid getting validity intercepts, we
4266 * do only accept the condition code from userspace.
4267 */
4268 vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC;
4269 vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask &
4270 PSW_MASK_CC;
4271 }
Fan Zhang80cd8762016-08-15 04:53:22 +02004272
David Hildenbrandb028ee32014-07-17 10:47:43 +02004273 kvm_run->kvm_dirty_regs = 0;
4274}
4275
Janosch Frank811ea792019-06-14 13:11:21 +02004276static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
David Hildenbrandb028ee32014-07-17 10:47:43 +02004277{
David Hildenbrandb028ee32014-07-17 10:47:43 +02004278 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
4279 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
4280 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
Christian Borntraeger35b3fde2018-01-17 14:44:34 +01004281 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
Fan Zhang4e0b1ab2016-11-29 07:17:55 +01004282 if (MACHINE_HAS_GS) {
4283 __ctl_set_bit(2, 4);
4284 if (vcpu->arch.gs_enabled)
4285 save_gs_cb(current->thread.gs_cb);
4286 preempt_disable();
4287 current->thread.gs_cb = vcpu->arch.host_gscb;
4288 restore_gs_cb(vcpu->arch.host_gscb);
4289 preempt_enable();
4290 if (!vcpu->arch.host_gscb)
4291 __ctl_clear_bit(2, 4);
4292 vcpu->arch.host_gscb = NULL;
4293 }
Christian Borntraegera3da7b42018-03-08 16:08:49 +00004294 /* SIE will save etoken directly into SDNX and therefore kvm_run */
David Hildenbrandb028ee32014-07-17 10:47:43 +02004295}
4296
Janosch Frank811ea792019-06-14 13:11:21 +02004297static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4298{
4299 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
4300 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
4301 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
4302 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4303 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
4304 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
4305 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
4306 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
4307 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
4308 save_access_regs(vcpu->run->s.regs.acrs);
4309 restore_access_regs(vcpu->arch.host_acrs);
4310 /* Save guest register state */
4311 save_fpu_regs();
4312 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
4313 /* Restore will be done lazily at return */
4314 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
4315 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4316 if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
4317 store_regs_fmt2(vcpu, kvm_run);
4318}
4319
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004320int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4321{
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004322 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004323
Paolo Bonzini460df4c2017-02-08 11:50:15 +01004324 if (kvm_run->immediate_exit)
4325 return -EINTR;
4326
Thomas Huth200824f2019-09-04 10:51:59 +02004327 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS ||
4328 kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS)
4329 return -EINVAL;
4330
Christoffer Dallaccb7572017-12-04 21:35:25 +01004331 vcpu_load(vcpu);
4332
David Hildenbrand27291e22014-01-23 12:26:52 +01004333 if (guestdbg_exit_pending(vcpu)) {
4334 kvm_s390_prepare_debug_exit(vcpu);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004335 rc = 0;
4336 goto out;
David Hildenbrand27291e22014-01-23 12:26:52 +01004337 }
4338
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004339 kvm_sigset_activate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004340
Janosch Frankfe28c7862019-05-15 13:24:30 +02004341 /*
4342 * no need to check the return value of vcpu_start as it can only have
4343 * an error for protvirt, but protvirt means user cpu state
4344 */
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004345 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
4346 kvm_s390_vcpu_start(vcpu);
4347 } else if (is_vcpu_stopped(vcpu)) {
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004348 pr_err_ratelimited("can't run stopped vcpu %d\n",
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004349 vcpu->vcpu_id);
Christoffer Dallaccb7572017-12-04 21:35:25 +01004350 rc = -EINVAL;
4351 goto out;
David Hildenbrand6352e4d2014-04-10 17:35:00 +02004352 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004353
David Hildenbrandb028ee32014-07-17 10:47:43 +02004354 sync_regs(vcpu, kvm_run);
David Hildenbranddb0758b2016-02-15 09:42:25 +01004355 enable_cpu_timer_accounting(vcpu);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004356
Heiko Carstensdab4079d2009-06-12 10:26:32 +02004357 might_fault();
Thomas Hutha76ccff2013-09-12 10:33:44 +02004358 rc = __vcpu_run(vcpu);
Christian Ehrhardt9ace9032009-05-20 15:34:55 +02004359
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004360 if (signal_pending(current) && !rc) {
4361 kvm_run->exit_reason = KVM_EXIT_INTR;
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004362 rc = -EINTR;
Christian Ehrhardtb1d16c42009-05-20 15:34:56 +02004363 }
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004364
David Hildenbrand27291e22014-01-23 12:26:52 +01004365 if (guestdbg_exit_pending(vcpu) && !rc) {
4366 kvm_s390_prepare_debug_exit(vcpu);
4367 rc = 0;
4368 }
4369
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004370 if (rc == -EREMOTE) {
David Hildenbrand71f116b2015-10-19 16:24:28 +02004371 /* userspace support is needed, kvm_run has been prepared */
Christian Borntraeger8f2abe62008-03-25 18:47:23 +01004372 rc = 0;
4373 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004374
David Hildenbranddb0758b2016-02-15 09:42:25 +01004375 disable_cpu_timer_accounting(vcpu);
David Hildenbrandb028ee32014-07-17 10:47:43 +02004376 store_regs(vcpu, kvm_run);
Carsten Otted7b0b5e2009-11-19 14:21:16 +01004377
Jan H. Schönherr20b70352017-11-24 22:39:01 +01004378 kvm_sigset_deactivate(vcpu);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004379
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004380 vcpu->stat.exit_userspace++;
Christoffer Dallaccb7572017-12-04 21:35:25 +01004381out:
4382 vcpu_put(vcpu);
Heiko Carstens7e8e6ab2008-04-04 15:12:35 +02004383 return rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004384}
4385
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004386/*
4387 * store status at address
4388 * we use have two special cases:
4389 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
4390 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
4391 */
Heiko Carstensd0bce602014-01-01 16:45:58 +01004392int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004393{
Carsten Otte092670c2011-07-24 10:48:22 +02004394 unsigned char archmode = 1;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004395 freg_t fprs[NUM_FPRS];
Michael Muellerfda902c2014-05-13 16:58:30 +02004396 unsigned int px;
David Hildenbrand4287f242016-02-15 09:40:12 +01004397 u64 clkcomp, cputm;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004398 int rc;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004399
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004400 px = kvm_s390_get_prefix(vcpu);
Heiko Carstensd0bce602014-01-01 16:45:58 +01004401 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
4402 if (write_guest_abs(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004403 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004404 gpa = 0;
Heiko Carstensd0bce602014-01-01 16:45:58 +01004405 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
4406 if (write_guest_real(vcpu, 163, &archmode, 1))
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004407 return -EFAULT;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004408 gpa = px;
4409 } else
4410 gpa -= __LC_FPREGS_SAVE_AREA;
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004411
4412 /* manually convert vector registers if necessary */
4413 if (MACHINE_HAS_VX) {
David Hildenbrand9522b372016-03-08 12:24:30 +01004414 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004415 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
4416 fprs, 128);
4417 } else {
4418 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
David Hildenbrand6fd8e672016-01-18 14:46:34 +01004419 vcpu->run->s.regs.fprs, 128);
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004420 }
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004421 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004422 vcpu->run->s.regs.gprs, 128);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004423 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004424 &vcpu->arch.sie_block->gpsw, 16);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004425 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
Michael Muellerfda902c2014-05-13 16:58:30 +02004426 &px, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004427 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004428 &vcpu->run->s.regs.fpc, 4);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004429 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004430 &vcpu->arch.sie_block->todpr, 4);
David Hildenbrand4287f242016-02-15 09:40:12 +01004431 cputm = kvm_s390_get_cpu_timer(vcpu);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004432 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
David Hildenbrand4287f242016-02-15 09:40:12 +01004433 &cputm, 8);
Thomas Huth178bd782013-11-13 20:28:18 +01004434 clkcomp = vcpu->arch.sie_block->ckc >> 8;
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004435 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004436 &clkcomp, 8);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004437 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004438 &vcpu->run->s.regs.acrs, 64);
Martin Schwidefskyd9a3a092015-10-23 09:02:32 +02004439 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
Heiko Carstensd0bce602014-01-01 16:45:58 +01004440 &vcpu->arch.sie_block->gcr, 128);
4441 return rc ? -EFAULT : 0;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004442}
4443
Thomas Huthe8798922013-11-06 15:46:33 +01004444int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
4445{
4446 /*
4447 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
Christian Borntraeger31d8b8d2016-11-10 14:22:02 +01004448 * switch in the run ioctl. Let's update our copies before we save
Thomas Huthe8798922013-11-06 15:46:33 +01004449 * it into the save area
4450 */
Hendrik Bruecknerd0164ee2015-06-29 16:43:06 +02004451 save_fpu_regs();
David Hildenbrand9abc2a02016-01-14 22:12:47 +01004452 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
Thomas Huthe8798922013-11-06 15:46:33 +01004453 save_access_regs(vcpu->run->s.regs.acrs);
4454
4455 return kvm_s390_store_status_unloaded(vcpu, addr);
4456}
4457
David Hildenbrand8ad35752014-03-14 11:00:21 +01004458static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4459{
4460 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004461 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004462}
4463
4464static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
4465{
4466 unsigned int i;
4467 struct kvm_vcpu *vcpu;
4468
4469 kvm_for_each_vcpu(i, vcpu, kvm) {
4470 __disable_ibs_on_vcpu(vcpu);
4471 }
4472}
4473
4474static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
4475{
David Hildenbrand09a400e2016-04-04 15:57:08 +02004476 if (!sclp.has_ibs)
4477 return;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004478 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
Christian Borntraeger8e236542015-04-09 13:49:04 +02004479 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004480}
4481
Janosch Frankfe28c7862019-05-15 13:24:30 +02004482int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004483{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004484 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004485
4486 if (!is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004487 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004488
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004489 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004490 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004491 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004492 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4493
Janosch Frankfe28c7862019-05-15 13:24:30 +02004494 /* Let's tell the UV that we want to change into the operating state */
4495 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4496 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR);
4497 if (r) {
4498 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4499 return r;
4500 }
4501 }
4502
David Hildenbrand8ad35752014-03-14 11:00:21 +01004503 for (i = 0; i < online_vcpus; i++) {
4504 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
4505 started_vcpus++;
4506 }
4507
4508 if (started_vcpus == 0) {
4509 /* we're the only active VCPU -> speed it up */
4510 __enable_ibs_on_vcpu(vcpu);
4511 } else if (started_vcpus == 1) {
4512 /*
4513 * As we are starting a second VCPU, we have to disable
4514 * the IBS facility on all VCPUs to remove potentially
4515 * oustanding ENABLE requests.
4516 */
4517 __disable_ibs_on_all_vcpus(vcpu->kvm);
4518 }
4519
David Hildenbrand9daecfc2018-01-23 18:05:30 +01004520 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004521 /*
Christian Borntraeger72f21822020-01-30 11:18:28 -05004522 * The real PSW might have changed due to a RESTART interpreted by the
4523 * ultravisor. We block all interrupts and let the next sie exit
4524 * refresh our view.
4525 */
4526 if (kvm_s390_pv_cpu_is_protected(vcpu))
4527 vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK;
4528 /*
David Hildenbrand8ad35752014-03-14 11:00:21 +01004529 * Another VCPU might have used IBS while we were offline.
4530 * Let's play safe and flush the VCPU at startup.
4531 */
David Hildenbrandd3d692c2014-07-29 08:53:36 +02004532 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004533 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004534 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004535}
4536
Janosch Frankfe28c7862019-05-15 13:24:30 +02004537int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004538{
Janosch Frankfe28c7862019-05-15 13:24:30 +02004539 int i, online_vcpus, r = 0, started_vcpus = 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004540 struct kvm_vcpu *started_vcpu = NULL;
4541
4542 if (is_vcpu_stopped(vcpu))
Janosch Frankfe28c7862019-05-15 13:24:30 +02004543 return 0;
David Hildenbrand8ad35752014-03-14 11:00:21 +01004544
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004545 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004546 /* Only one cpu at a time may enter/leave the STOPPED state. */
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004547 spin_lock(&vcpu->kvm->arch.start_stop_lock);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004548 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
4549
Janosch Frankfe28c7862019-05-15 13:24:30 +02004550 /* Let's tell the UV that we want to change into the stopped state */
4551 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4552 r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP);
4553 if (r) {
4554 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
4555 return r;
4556 }
4557 }
4558
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004559 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
David Hildenbrand6cddd432014-10-15 16:48:53 +02004560 kvm_s390_clear_stop_irq(vcpu);
David Hildenbrand32f5ff632014-04-14 12:40:03 +02004561
David Hildenbrandef8f4f42018-01-23 18:05:29 +01004562 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
David Hildenbrand8ad35752014-03-14 11:00:21 +01004563 __disable_ibs_on_vcpu(vcpu);
4564
4565 for (i = 0; i < online_vcpus; i++) {
4566 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
4567 started_vcpus++;
4568 started_vcpu = vcpu->kvm->vcpus[i];
4569 }
4570 }
4571
4572 if (started_vcpus == 1) {
4573 /*
4574 * As we only have one VCPU left, we want to enable the
4575 * IBS facility for that VCPU to speed it up.
4576 */
4577 __enable_ibs_on_vcpu(started_vcpu);
4578 }
4579
David Hildenbrand433b9ee2014-05-06 16:11:14 +02004580 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
Janosch Frankfe28c7862019-05-15 13:24:30 +02004581 return 0;
David Hildenbrand6852d7b2014-03-14 10:59:29 +01004582}
4583
Cornelia Huckd6712df2012-12-20 15:32:11 +01004584static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4585 struct kvm_enable_cap *cap)
4586{
4587 int r;
4588
4589 if (cap->flags)
4590 return -EINVAL;
4591
4592 switch (cap->cap) {
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004593 case KVM_CAP_S390_CSS_SUPPORT:
4594 if (!vcpu->kvm->arch.css_support) {
4595 vcpu->kvm->arch.css_support = 1;
Christian Borntraegerc92ea7b2015-07-22 15:52:10 +02004596 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
Cornelia Huckfa6b7fe2012-12-20 15:32:12 +01004597 trace_kvm_s390_enable_css(vcpu->kvm);
4598 }
4599 r = 0;
4600 break;
Cornelia Huckd6712df2012-12-20 15:32:11 +01004601 default:
4602 r = -EINVAL;
4603 break;
4604 }
4605 return r;
4606}
4607
Janosch Frank19e12272019-04-02 09:21:06 +02004608static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
4609 struct kvm_s390_mem_op *mop)
4610{
4611 void __user *uaddr = (void __user *)mop->buf;
4612 int r = 0;
4613
4614 if (mop->flags || !mop->size)
4615 return -EINVAL;
4616 if (mop->size + mop->sida_offset < mop->size)
4617 return -EINVAL;
4618 if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
4619 return -E2BIG;
4620
4621 switch (mop->op) {
4622 case KVM_S390_MEMOP_SIDA_READ:
4623 if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) +
4624 mop->sida_offset), mop->size))
4625 r = -EFAULT;
4626
4627 break;
4628 case KVM_S390_MEMOP_SIDA_WRITE:
4629 if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) +
4630 mop->sida_offset), uaddr, mop->size))
4631 r = -EFAULT;
4632 break;
4633 }
4634 return r;
4635}
Thomas Huth41408c282015-02-06 15:01:21 +01004636static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4637 struct kvm_s390_mem_op *mop)
4638{
4639 void __user *uaddr = (void __user *)mop->buf;
4640 void *tmpbuf = NULL;
Janosch Frank19e12272019-04-02 09:21:06 +02004641 int r = 0;
Thomas Huth41408c282015-02-06 15:01:21 +01004642 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4643 | KVM_S390_MEMOP_F_CHECK_ONLY;
4644
Thomas Hutha13b03b2019-08-29 14:25:17 +02004645 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
Thomas Huth41408c282015-02-06 15:01:21 +01004646 return -EINVAL;
4647
4648 if (mop->size > MEM_OP_MAX_SIZE)
4649 return -E2BIG;
4650
Janosch Frank19e12272019-04-02 09:21:06 +02004651 if (kvm_s390_pv_cpu_is_protected(vcpu))
4652 return -EINVAL;
4653
Thomas Huth41408c282015-02-06 15:01:21 +01004654 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4655 tmpbuf = vmalloc(mop->size);
4656 if (!tmpbuf)
4657 return -ENOMEM;
4658 }
4659
Thomas Huth41408c282015-02-06 15:01:21 +01004660 switch (mop->op) {
4661 case KVM_S390_MEMOP_LOGICAL_READ:
4662 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004663 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4664 mop->size, GACC_FETCH);
Thomas Huth41408c282015-02-06 15:01:21 +01004665 break;
4666 }
4667 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4668 if (r == 0) {
4669 if (copy_to_user(uaddr, tmpbuf, mop->size))
4670 r = -EFAULT;
4671 }
4672 break;
4673 case KVM_S390_MEMOP_LOGICAL_WRITE:
4674 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
David Hildenbrand92c96322015-11-16 15:42:11 +01004675 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4676 mop->size, GACC_STORE);
Thomas Huth41408c282015-02-06 15:01:21 +01004677 break;
4678 }
4679 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4680 r = -EFAULT;
4681 break;
4682 }
4683 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4684 break;
Thomas Huth41408c282015-02-06 15:01:21 +01004685 }
4686
Thomas Huth41408c282015-02-06 15:01:21 +01004687 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4688 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4689
4690 vfree(tmpbuf);
4691 return r;
4692}
4693
Janosch Frank19e12272019-04-02 09:21:06 +02004694static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu,
4695 struct kvm_s390_mem_op *mop)
4696{
4697 int r, srcu_idx;
4698
4699 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4700
4701 switch (mop->op) {
4702 case KVM_S390_MEMOP_LOGICAL_READ:
4703 case KVM_S390_MEMOP_LOGICAL_WRITE:
4704 r = kvm_s390_guest_mem_op(vcpu, mop);
4705 break;
4706 case KVM_S390_MEMOP_SIDA_READ:
4707 case KVM_S390_MEMOP_SIDA_WRITE:
4708 /* we are locked against sida going away by the vcpu->mutex */
4709 r = kvm_s390_guest_sida_op(vcpu, mop);
4710 break;
4711 default:
4712 r = -EINVAL;
4713 }
4714
4715 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4716 return r;
4717}
4718
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004719long kvm_arch_vcpu_async_ioctl(struct file *filp,
4720 unsigned int ioctl, unsigned long arg)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004721{
4722 struct kvm_vcpu *vcpu = filp->private_data;
4723 void __user *argp = (void __user *)arg;
4724
Avi Kivity93736622010-05-13 12:35:17 +03004725 switch (ioctl) {
Jens Freimann47b43c52014-11-11 20:57:06 +01004726 case KVM_S390_IRQ: {
4727 struct kvm_s390_irq s390irq;
4728
Jens Freimann47b43c52014-11-11 20:57:06 +01004729 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004730 return -EFAULT;
4731 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Jens Freimann47b43c52014-11-11 20:57:06 +01004732 }
Avi Kivity93736622010-05-13 12:35:17 +03004733 case KVM_S390_INTERRUPT: {
Carsten Otteba5c1e92008-03-25 18:47:26 +01004734 struct kvm_s390_interrupt s390int;
Thomas Huth53936b52019-09-12 13:54:38 +02004735 struct kvm_s390_irq s390irq = {};
Carsten Otteba5c1e92008-03-25 18:47:26 +01004736
4737 if (copy_from_user(&s390int, argp, sizeof(s390int)))
Christoffer Dall9b0624712017-12-04 21:35:36 +01004738 return -EFAULT;
Jens Freimann383d0b02014-07-29 15:11:49 +02004739 if (s390int_to_s390irq(&s390int, &s390irq))
4740 return -EINVAL;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004741 return kvm_s390_inject_vcpu(vcpu, &s390irq);
Carsten Otteba5c1e92008-03-25 18:47:26 +01004742 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004743 }
Paolo Bonzini5cb09442017-12-12 17:41:34 +01004744 return -ENOIOCTLCMD;
4745}
4746
4747long kvm_arch_vcpu_ioctl(struct file *filp,
4748 unsigned int ioctl, unsigned long arg)
4749{
4750 struct kvm_vcpu *vcpu = filp->private_data;
4751 void __user *argp = (void __user *)arg;
4752 int idx;
4753 long r;
Janosch Frank8a8378f2020-01-09 04:37:50 -05004754 u16 rc, rrc;
Christoffer Dall9b0624712017-12-04 21:35:36 +01004755
4756 vcpu_load(vcpu);
4757
4758 switch (ioctl) {
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004759 case KVM_S390_STORE_STATUS:
Thomas Huth800c1062013-09-12 10:33:45 +02004760 idx = srcu_read_lock(&vcpu->kvm->srcu);
Christian Borntraeger55680892020-01-31 05:02:00 -05004761 r = kvm_s390_store_status_unloaded(vcpu, arg);
Thomas Huth800c1062013-09-12 10:33:45 +02004762 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004763 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004764 case KVM_S390_SET_INITIAL_PSW: {
4765 psw_t psw;
4766
Avi Kivitybc923cc2010-05-13 12:21:46 +03004767 r = -EFAULT;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004768 if (copy_from_user(&psw, argp, sizeof(psw)))
Avi Kivitybc923cc2010-05-13 12:21:46 +03004769 break;
4770 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4771 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004772 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004773 case KVM_S390_CLEAR_RESET:
4774 r = 0;
4775 kvm_arch_vcpu_ioctl_clear_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004776 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4777 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4778 UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc);
4779 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x",
4780 rc, rrc);
4781 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004782 break;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004783 case KVM_S390_INITIAL_RESET:
Janosch Frank7de3f142020-01-31 05:02:02 -05004784 r = 0;
4785 kvm_arch_vcpu_ioctl_initial_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004786 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4787 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4788 UVC_CMD_CPU_RESET_INITIAL,
4789 &rc, &rrc);
4790 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x",
4791 rc, rrc);
4792 }
Janosch Frank7de3f142020-01-31 05:02:02 -05004793 break;
4794 case KVM_S390_NORMAL_RESET:
4795 r = 0;
4796 kvm_arch_vcpu_ioctl_normal_reset(vcpu);
Janosch Frank8a8378f2020-01-09 04:37:50 -05004797 if (kvm_s390_pv_cpu_is_protected(vcpu)) {
4798 r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu),
4799 UVC_CMD_CPU_RESET, &rc, &rrc);
4800 VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x",
4801 rc, rrc);
4802 }
Avi Kivitybc923cc2010-05-13 12:21:46 +03004803 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004804 case KVM_SET_ONE_REG:
4805 case KVM_GET_ONE_REG: {
4806 struct kvm_one_reg reg;
Janosch Frank68cf7b12019-06-14 13:11:21 +02004807 r = -EINVAL;
4808 if (kvm_s390_pv_cpu_is_protected(vcpu))
4809 break;
Carsten Otte14eebd92012-05-15 14:15:26 +02004810 r = -EFAULT;
4811 if (copy_from_user(&reg, argp, sizeof(reg)))
4812 break;
4813 if (ioctl == KVM_SET_ONE_REG)
4814 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4815 else
4816 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4817 break;
4818 }
Carsten Otte27e03932012-01-04 10:25:21 +01004819#ifdef CONFIG_KVM_S390_UCONTROL
4820 case KVM_S390_UCAS_MAP: {
4821 struct kvm_s390_ucas_mapping ucasmap;
4822
4823 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4824 r = -EFAULT;
4825 break;
4826 }
4827
4828 if (!kvm_is_ucontrol(vcpu->kvm)) {
4829 r = -EINVAL;
4830 break;
4831 }
4832
4833 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4834 ucasmap.vcpu_addr, ucasmap.length);
4835 break;
4836 }
4837 case KVM_S390_UCAS_UNMAP: {
4838 struct kvm_s390_ucas_mapping ucasmap;
4839
4840 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4841 r = -EFAULT;
4842 break;
4843 }
4844
4845 if (!kvm_is_ucontrol(vcpu->kvm)) {
4846 r = -EINVAL;
4847 break;
4848 }
4849
4850 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4851 ucasmap.length);
4852 break;
4853 }
4854#endif
Carsten Otteccc79102012-01-04 10:25:26 +01004855 case KVM_S390_VCPU_FAULT: {
Martin Schwidefsky527e30b2014-04-30 16:04:25 +02004856 r = gmap_fault(vcpu->arch.gmap, arg, 0);
Carsten Otteccc79102012-01-04 10:25:26 +01004857 break;
4858 }
Cornelia Huckd6712df2012-12-20 15:32:11 +01004859 case KVM_ENABLE_CAP:
4860 {
4861 struct kvm_enable_cap cap;
4862 r = -EFAULT;
4863 if (copy_from_user(&cap, argp, sizeof(cap)))
4864 break;
4865 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4866 break;
4867 }
Thomas Huth41408c282015-02-06 15:01:21 +01004868 case KVM_S390_MEM_OP: {
4869 struct kvm_s390_mem_op mem_op;
4870
4871 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
Janosch Frank19e12272019-04-02 09:21:06 +02004872 r = kvm_s390_guest_memsida_op(vcpu, &mem_op);
Thomas Huth41408c282015-02-06 15:01:21 +01004873 else
4874 r = -EFAULT;
4875 break;
4876 }
Jens Freimann816c7662014-11-24 17:13:46 +01004877 case KVM_S390_SET_IRQ_STATE: {
4878 struct kvm_s390_irq_state irq_state;
4879
4880 r = -EFAULT;
4881 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4882 break;
4883 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4884 irq_state.len == 0 ||
4885 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4886 r = -EINVAL;
4887 break;
4888 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004889 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004890 r = kvm_s390_set_irq_state(vcpu,
4891 (void __user *) irq_state.buf,
4892 irq_state.len);
4893 break;
4894 }
4895 case KVM_S390_GET_IRQ_STATE: {
4896 struct kvm_s390_irq_state irq_state;
4897
4898 r = -EFAULT;
4899 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4900 break;
4901 if (irq_state.len == 0) {
4902 r = -EINVAL;
4903 break;
4904 }
Christian Borntraegerbb64da92017-11-21 16:02:52 +01004905 /* do not use irq_state.flags, it will break old QEMUs */
Jens Freimann816c7662014-11-24 17:13:46 +01004906 r = kvm_s390_get_irq_state(vcpu,
4907 (__u8 __user *) irq_state.buf,
4908 irq_state.len);
4909 break;
4910 }
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004911 default:
Carsten Otte3e6afcf2012-01-04 10:25:30 +01004912 r = -ENOTTY;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004913 }
Christoffer Dall9b0624712017-12-04 21:35:36 +01004914
4915 vcpu_put(vcpu);
Avi Kivitybc923cc2010-05-13 12:21:46 +03004916 return r;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004917}
4918
Souptick Joarder1499fa82018-04-19 00:49:58 +05304919vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
Carsten Otte5b1c1492012-01-04 10:25:23 +01004920{
4921#ifdef CONFIG_KVM_S390_UCONTROL
4922 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4923 && (kvm_is_ucontrol(vcpu->kvm))) {
4924 vmf->page = virt_to_page(vcpu->arch.sie_block);
4925 get_page(vmf->page);
4926 return 0;
4927 }
4928#endif
4929 return VM_FAULT_SIGBUS;
4930}
4931
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004932/* Section: memory related */
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004933int kvm_arch_prepare_memory_region(struct kvm *kvm,
4934 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004935 const struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa7b6195a2013-02-27 19:44:34 +09004936 enum kvm_mr_change change)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004937{
Nick Wangdd2887e2013-03-25 17:22:57 +01004938 /* A few sanity checks. We can have memory slots which have to be
4939 located/ended at a segment boundary (1MB). The memory in userland is
4940 ok to be fragmented into various different vmas. It is okay to mmap()
4941 and munmap() stuff in this slot after doing this call at any time */
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004942
Carsten Otte598841c2011-07-24 10:48:21 +02004943 if (mem->userspace_addr & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004944 return -EINVAL;
4945
Carsten Otte598841c2011-07-24 10:48:21 +02004946 if (mem->memory_size & 0xffffful)
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004947 return -EINVAL;
4948
Dominik Dingela3a92c32014-12-01 17:24:42 +01004949 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4950 return -EINVAL;
4951
Janosch Frank29b40f12019-09-30 04:19:18 -04004952 /* When we are protected, we should not change the memory slots */
4953 if (kvm_s390_pv_get_handle(kvm))
4954 return -EINVAL;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004955 return 0;
4956}
4957
4958void kvm_arch_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02004959 const struct kvm_userspace_memory_region *mem,
Sean Christopherson9d4c1972020-02-18 13:07:24 -08004960 struct kvm_memory_slot *old,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02004961 const struct kvm_memory_slot *new,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09004962 enum kvm_mr_change change)
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004963{
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004964 int rc = 0;
Marcelo Tosattif7784b82009-12-23 14:35:18 -02004965
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004966 switch (change) {
4967 case KVM_MR_DELETE:
4968 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4969 old->npages * PAGE_SIZE);
4970 break;
4971 case KVM_MR_MOVE:
4972 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4973 old->npages * PAGE_SIZE);
4974 if (rc)
4975 break;
Joe Perches3b684a42020-03-10 21:51:32 -07004976 fallthrough;
Christian Borntraeger19ec1662019-05-24 16:06:23 +02004977 case KVM_MR_CREATE:
4978 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4979 mem->guest_phys_addr, mem->memory_size);
4980 break;
4981 case KVM_MR_FLAGS_ONLY:
4982 break;
4983 default:
4984 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4985 }
Carsten Otte598841c2011-07-24 10:48:21 +02004986 if (rc)
David Hildenbrandea2cdd22015-05-20 13:24:02 +02004987 pr_warn("failed to commit memory region\n");
Carsten Otte598841c2011-07-24 10:48:21 +02004988 return;
Heiko Carstensb0c632d2008-03-25 18:47:20 +01004989}
4990
Alexander Yarygin60a37702016-04-01 15:38:57 +03004991static inline unsigned long nonhyp_mask(int i)
4992{
4993 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4994
4995 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4996}
4997
Christian Borntraeger3491caf2016-05-13 12:16:35 +02004998void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4999{
5000 vcpu->valid_wakeup = false;
5001}
5002
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005003static int __init kvm_s390_init(void)
5004{
Alexander Yarygin60a37702016-04-01 15:38:57 +03005005 int i;
5006
David Hildenbrand07197fd2015-01-30 16:01:38 +01005007 if (!sclp.has_sief2) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005008 pr_info("SIE is not available\n");
David Hildenbrand07197fd2015-01-30 16:01:38 +01005009 return -ENODEV;
5010 }
5011
Janosch Franka4499382018-07-13 11:28:31 +01005012 if (nested && hpage) {
Michael Mueller8d43d572018-12-10 11:15:16 +01005013 pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n");
Janosch Franka4499382018-07-13 11:28:31 +01005014 return -EINVAL;
5015 }
5016
Alexander Yarygin60a37702016-04-01 15:38:57 +03005017 for (i = 0; i < 16; i++)
Christian Borntraegerc3b9e3e2018-02-09 16:26:29 +00005018 kvm_s390_fac_base[i] |=
Alexander Yarygin60a37702016-04-01 15:38:57 +03005019 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
5020
Michael Mueller9d8d5782015-02-02 15:42:51 +01005021 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005022}
5023
5024static void __exit kvm_s390_exit(void)
5025{
Heiko Carstensb0c632d2008-03-25 18:47:20 +01005026 kvm_exit();
5027}
5028
5029module_init(kvm_s390_init);
5030module_exit(kvm_s390_exit);
Cornelia Huck566af942013-05-27 18:42:33 +02005031
5032/*
5033 * Enable autoloading of the kvm module.
5034 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
5035 * since x86 takes a different approach.
5036 */
5037#include <linux/miscdevice.h>
5038MODULE_ALIAS_MISCDEV(KVM_MINOR);
5039MODULE_ALIAS("devname:kvm");