blob: 2d88944f9f34f7a2efd3f1c74b139f17cd01057a [file] [log] [blame]
Thomas Gleixnerd94d71c2019-05-29 07:12:40 -07001/* SPDX-License-Identifier: GPL-2.0-only */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05002/*
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05003 *
4 * Copyright IBM Corp. 2008
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 */
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13 * dependencies. */
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
Paul Mackerrasa136a8b2012-09-25 20:31:56 +000020#include <linux/bug.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010021#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
Alexander Grafc7f38f42010-04-16 00:11:40 +020023#else
24#include <asm/kvm_booke.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010025#endif
Paul Mackerras371fefd2011-06-29 00:23:08 +000026#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
Paul Mackerras03f95332019-02-04 22:07:20 +110028#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
Paul Mackerras371fefd2011-06-29 00:23:08 +000030#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050031
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +053032/*
33 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34 * for supporting software breakpoint.
35 */
36#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
37
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050038enum emulation_result {
39 EMULATE_DONE, /* no further processing */
40 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050041 EMULATE_FAIL, /* can't emulate this instruction */
Alexander Graf37f5bca2010-02-19 11:00:31 +010042 EMULATE_AGAIN, /* something went wrong. go again */
Bharat Bhushanc402a3f2013-04-08 00:32:13 +000043 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044};
45
Simon Guo70923602018-05-21 13:24:21 +080046enum instruction_fetch_type {
Mihai Caraman51f04722014-07-23 19:06:21 +030047 INST_GENERIC,
48 INST_SC, /* system call */
49};
50
Alexander Graf7d15c06f2014-06-20 13:52:36 +020051enum xlate_instdata {
52 XLATE_INST, /* translate instruction address */
53 XLATE_DATA /* translate data address */
54};
55
56enum xlate_readwrite {
57 XLATE_READ, /* check for read permissions */
58 XLATE_WRITE /* check for write permissions */
59};
60
Tianjia Zhang8c99d342020-04-27 12:35:11 +080061extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
Tianjia Zhang7ec21d92020-06-23 21:14:16 +080062extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
Alexander Graf29eb61b2009-10-30 05:47:07 +000063extern void kvmppc_handler_highmem(void);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050064
65extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080066extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050067 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010068 int is_default_endian);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080069extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
Alexander Graf3587d532010-02-19 11:00:30 +010070 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010071 int is_default_endian);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080072extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
Bin Lu6f63e812017-02-21 21:12:36 +080073 unsigned int rt, unsigned int bytes,
74 int is_default_endian, int mmio_sign_extend);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080075extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
Simon Guoacc9eb92018-05-21 13:24:26 +080076 unsigned int rt, unsigned int bytes, int is_default_endian);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080077extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
Simon Guoacc9eb92018-05-21 13:24:26 +080078 unsigned int rs, unsigned int bytes, int is_default_endian);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080079extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +010080 u64 val, unsigned int bytes,
81 int is_default_endian);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080082extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
Bin Lu6f63e812017-02-21 21:12:36 +080083 int rs, unsigned int bytes,
84 int is_default_endian);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050085
Mihai Caraman51f04722014-07-23 19:06:21 +030086extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
Simon Guo70923602018-05-21 13:24:21 +080087 enum instruction_fetch_type type, u32 *inst);
Mihai Caraman51f04722014-07-23 19:06:21 +030088
Alexander Graf35c4a732014-06-20 13:58:16 +020089extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
90 bool data);
91extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92 bool data);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080093extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
Alexander Grafd69614a2014-06-18 14:53:49 +020094extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
Tianjia Zhang8c99d342020-04-27 12:35:11 +080095extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -060096extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -050097extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
Mihai Caramand02d4d12014-09-01 17:19:56 +030098extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
Alexander Grafaf8f38b2011-08-10 13:57:08 +020099extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500102
Hollis Blanchardecc09812009-01-03 16:22:59 -0600103/* Core-specific hooks */
104
Hollis Blanchard89168612008-12-02 15:51:53 -0600105extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600106 unsigned int gtlb_idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500107extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
Hollis Blanchard49dd2c42008-07-25 13:54:53 -0500108extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600109extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
110extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600111extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
112 gva_t eaddr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600113extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
114extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
Alexander Graf7d15c06f2014-06-20 13:52:36 +0200115extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
116 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
117 struct kvmppc_pte *pte);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600118
Sean Christophersonff030fd2019-12-18 13:55:00 -0800119extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
Hollis Blancharddb93f572008-11-05 09:36:18 -0600120extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600121extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600122extern int kvmppc_core_check_processor_compat(void);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600123extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124 struct kvm_translation *tr);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600125
126extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000129extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600130extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
Paul Mackerras884dfb72019-02-21 13:38:49 +1100131extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
Nicholas Piggin9dc2bab2021-05-28 19:07:33 +1000132extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
Alexander Graf25a8a022010-01-08 02:58:07 +0100133extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
Paul Mackerras307d9272017-03-22 21:02:08 +1100134extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
136extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600137extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
Alexander Graf7706664d2009-12-21 20:21:24 +0100138extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600139extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
140 struct kvm_interrupt *irq);
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000141extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
Alexander Graf8de12012014-06-18 21:56:55 +0200142extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
143 ulong esr_flags);
144extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
145 ulong dear_flags,
146 ulong esr_flags);
147extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
148extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
149 ulong esr_flags);
Alexander Graf862d31f2012-07-31 00:19:50 +0200150extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200151extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -0600152
Hollis Blancharddb93f572008-11-05 09:36:18 -0600153extern int kvmppc_booke_init(void);
154extern void kvmppc_booke_exit(void);
155
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600156extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
Alexander Graf2a342ed2010-07-29 14:47:48 +0200157extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
Scott Wooda4cd8b22011-06-14 18:34:41 -0500158extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600159
David Gibsonaae07772016-12-20 16:49:02 +1100160extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
161extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
David Gibsonf98a8bf2016-12-20 16:49:03 +1100162extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
David Gibsonaae07772016-12-20 16:49:02 +1100163extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
Paul Mackerras18c36402017-09-13 16:00:10 +1000164extern void kvmppc_rmap_reset(struct kvm *kvm);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000165extern long kvmppc_prepare_vrma(struct kvm *kvm,
166 struct kvm_userspace_memory_region *mem);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000167extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000168 struct kvm_memory_slot *memslot, unsigned long porder);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000169extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
Alexey Kardashevskiy121f80b2017-03-22 15:21:56 +1100170extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
171 struct iommu_group *grp);
172extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
173 struct iommu_group *grp);
Paul Mackerras18c36402017-09-13 16:00:10 +1000174extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
175extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
Paul Mackerrasded13fc2017-11-22 14:38:53 +1100176extern void kvmppc_setup_partition_table(struct kvm *kvm);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000177
David Gibson54738c02011-06-29 00:22:41 +0000178extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
Alexey Kardashevskiy58ded422016-03-01 17:54:40 +1100179 struct kvm_create_spapr_tce_64 *args);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100180extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
Alexey Kardashevskiy503bfcb2017-03-22 15:21:53 +1100181 struct kvm *kvm, unsigned long liobn);
Alexey Kardashevskiyb1af23d2017-03-22 15:21:55 +1100182#define kvmppc_ioba_validate(stt, ioba, npages) \
183 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
184 (stt)->size, (ioba), (npages)) ? \
185 H_PARAMETER : H_SUCCESS)
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000186extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
187 unsigned long ioba, unsigned long tce);
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +1100188extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
189 unsigned long liobn, unsigned long ioba,
190 unsigned long tce_list, unsigned long npages);
191extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
192 unsigned long liobn, unsigned long ioba,
193 unsigned long tce_value, unsigned long npages);
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100194extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
195 unsigned long ioba);
David Gibsondb9a2902016-12-20 16:48:59 +1100196extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
197extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000198extern int kvmppc_core_init_vm(struct kvm *kvm);
199extern void kvmppc_core_destroy_vm(struct kvm *kvm);
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530200extern void kvmppc_core_free_memslot(struct kvm *kvm,
Sean Christophersone96c81e2020-02-18 13:07:27 -0800201 struct kvm_memory_slot *slot);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000202extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000203 struct kvm_memory_slot *memslot,
Sean Christopherson82307e62020-02-18 13:07:18 -0800204 const struct kvm_userspace_memory_region *mem,
205 enum kvm_mr_change change);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000206extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200207 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200208 const struct kvm_memory_slot *old,
Bharata B Raof032b732018-12-12 15:15:30 +1100209 const struct kvm_memory_slot *new,
210 enum kvm_mr_change change);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000211extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
212 struct kvm_ppc_smmu_info *info);
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000213extern void kvmppc_core_flush_memslot(struct kvm *kvm,
214 struct kvm_memory_slot *memslot);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000215
Scott Woodd30f6e42011-12-20 15:34:43 +0000216extern int kvmppc_bookehv_init(void);
217extern void kvmppc_bookehv_exit(void);
218
Alexander Graf03d25c52012-08-10 12:28:50 +0200219extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
220
Paul Mackerrasa2932922012-11-19 22:57:20 +0000221extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
David Gibson5e985962016-12-20 16:49:05 +1100222extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
223 struct kvm_ppc_resize_hpt *rhpt);
224extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
225 struct kvm_ppc_resize_hpt *rhpt);
Paul Mackerrasa2932922012-11-19 22:57:20 +0000226
Scott Wood5df554ad2013-04-12 14:08:46 +0000227int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
228
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000229extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
230extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
231extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000232
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000233extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
234 u32 priority);
235extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
236 u32 *priority);
Paul Mackerrasd19bd8622013-04-17 20:32:04 +0000237extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
238extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000239
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530240void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
241void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
242
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530243union kvmppc_one_reg {
244 u32 wval;
245 u64 dval;
246 vector128 vval;
247 u64 vsxval[2];
Bin Lu6f63e812017-02-21 21:12:36 +0800248 u32 vsx32val[4];
Simon Guoacc9eb92018-05-21 13:24:26 +0800249 u16 vsx16val[8];
250 u8 vsx8val[16];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530251 struct {
252 u64 addr;
253 u64 length;
254 } vpaval;
Cédric Le Goatere4945b92019-04-18 12:39:35 +0200255 u64 xive_timaval[2];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530256};
257
258struct kvmppc_ops {
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530259 struct module *owner;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530260 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
261 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
262 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
263 union kvmppc_one_reg *val);
264 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
265 union kvmppc_one_reg *val);
266 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
267 void (*vcpu_put)(struct kvm_vcpu *vcpu);
Nicholas Piggin87a45e02019-10-02 16:00:22 +1000268 void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530269 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800270 int (*vcpu_run)(struct kvm_vcpu *vcpu);
Sean Christophersonff030fd2019-12-18 13:55:00 -0800271 int (*vcpu_create)(struct kvm_vcpu *vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530272 void (*vcpu_free)(struct kvm_vcpu *vcpu);
273 int (*check_requests)(struct kvm_vcpu *vcpu);
274 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
275 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
276 int (*prepare_memory_region)(struct kvm *kvm,
277 struct kvm_memory_slot *memslot,
Sean Christopherson82307e62020-02-18 13:07:18 -0800278 const struct kvm_userspace_memory_region *mem,
279 enum kvm_mr_change change);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530280 void (*commit_memory_region)(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +0200281 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +0200282 const struct kvm_memory_slot *old,
Bharata B Raof032b732018-12-12 15:15:30 +1100283 const struct kvm_memory_slot *new,
284 enum kvm_mr_change change);
Sean Christophersonb1c53562021-04-01 17:56:53 -0700285 bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
286 bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
287 bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
288 bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
Sean Christophersone96c81e2020-02-18 13:07:27 -0800289 void (*free_memslot)(struct kvm_memory_slot *slot);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530290 int (*init_vm)(struct kvm *kvm);
291 void (*destroy_vm)(struct kvm *kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530292 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
Tianjia Zhang8c99d342020-04-27 12:35:11 +0800293 int (*emulate_op)(struct kvm_vcpu *vcpu,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530294 unsigned int inst, int *advance);
295 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
296 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
297 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
298 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
299 unsigned long arg);
Paul Mackerrasae2113a2014-06-02 11:03:00 +1000300 int (*hcall_implemented)(unsigned long hcall);
Suresh Warrier95767302016-08-19 15:35:47 +1000301 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
302 struct irq_bypass_producer *);
303 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
304 struct irq_bypass_producer *);
Paul Mackerrasc9270132017-01-30 21:21:41 +1100305 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
306 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
Paul Mackerras3c313522017-02-06 13:24:41 +1100307 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
308 unsigned long flags);
Simon Guo2e6baa42018-05-21 13:24:22 +0800309 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
Paul Mackerrasaa069a92018-09-21 20:02:01 +1000310 int (*enable_nested)(struct kvm *kvm);
Suraj Jitindar Singhdceadcf2018-12-14 16:29:06 +1100311 int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
312 int size);
313 int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314 int size);
Paul Mackerras9a5788c2020-03-19 15:29:55 +1100315 int (*enable_svm)(struct kvm *kvm);
Bharata B Rao22945682019-11-25 08:36:30 +0530316 int (*svm_off)(struct kvm *kvm);
Ravi Bangoriad9a47ed2020-12-16 16:12:19 +0530317 int (*enable_dawr1)(struct kvm *kvm);
Fabiano Rosasa7220762021-02-05 13:41:54 -0300318 bool (*hash_v3_possible)(void);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530319};
320
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530321extern struct kvmppc_ops *kvmppc_hv_ops;
322extern struct kvmppc_ops *kvmppc_pr_ops;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530323
Mihai Caraman51f04722014-07-23 19:06:21 +0300324static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
Simon Guo70923602018-05-21 13:24:21 +0800325 enum instruction_fetch_type type, u32 *inst)
Mihai Caraman51f04722014-07-23 19:06:21 +0300326{
327 int ret = EMULATE_DONE;
328 u32 fetched_inst;
329
330 /* Load the instruction manually if it failed to do so in the
331 * exit path */
332 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
333 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
334
335 /* Write fetch_failed unswapped if the fetch failed */
336 if (ret == EMULATE_DONE)
337 fetched_inst = kvmppc_need_byteswap(vcpu) ?
338 swab32(vcpu->arch.last_inst) :
339 vcpu->arch.last_inst;
340 else
341 fetched_inst = vcpu->arch.last_inst;
342
343 *inst = fetched_inst;
344 return ret;
345}
346
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530347static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
348{
349 return kvm->arch.kvm_ops == kvmppc_hv_ops;
350}
351
Michael Ellermane928e9c2015-03-20 20:39:41 +1100352extern int kvmppc_hwrng_present(void);
353
Alexander Graf0564ee82010-02-19 11:00:42 +0100354/*
355 * Cuts out inst bits with ordering according to spec.
356 * That means the leftmost bit is zero. All given bits are included.
357 */
358static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
359{
360 u32 r;
361 u32 mask;
362
363 BUG_ON(msb > lsb);
364
365 mask = (1 << (lsb - msb + 1)) - 1;
366 r = (inst >> (63 - lsb)) & mask;
367
368 return r;
369}
370
371/*
372 * Replaces inst bits with ordering according to spec.
373 */
374static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
375{
376 u32 r;
377 u32 mask;
378
379 BUG_ON(msb > lsb);
380
381 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
382 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
383
384 return r;
385}
386
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000387#define one_reg_size(id) \
388 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
389
390#define get_reg_val(id, reg) ({ \
391 union kvmppc_one_reg __u; \
392 switch (one_reg_size(id)) { \
393 case 4: __u.wval = (reg); break; \
394 case 8: __u.dval = (reg); break; \
395 default: BUG(); \
396 } \
397 __u; \
398})
399
400
401#define set_reg_val(id, val) ({ \
402 u64 __v; \
403 switch (one_reg_size(id)) { \
404 case 4: __v = (val).wval; break; \
405 case 8: __v = (val).dval; break; \
406 default: BUG(); \
407 } \
408 __v; \
409})
410
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530411int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500412int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
413
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530414int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500415int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
416
Paul Mackerras31f34382011-12-12 12:26:50 +0000417int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
418int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000419int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
420int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
Paul Mackerras31f34382011-12-12 12:26:50 +0000421
Scott Wood5ce941e2011-04-27 17:24:21 -0500422void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
423
Scott Wood5df554ad2013-04-12 14:08:46 +0000424struct openpic;
Scott Wood5df554ad2013-04-12 14:08:46 +0000425
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530426#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530427extern void kvm_cma_reserve(void) __init;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000428static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
429{
Nicholas Piggind2e60072018-02-14 01:08:12 +1000430 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000431}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000432
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000433static inline void kvmppc_set_xive_tima(int cpu,
434 unsigned long phys_addr,
435 void __iomem *virt_addr)
436{
Nicholas Piggind2e60072018-02-14 01:08:12 +1000437 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
438 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000439}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000440
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000441static inline u32 kvmppc_get_xics_latch(void)
442{
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530443 u32 xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000444
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530445 xirr = get_paca()->kvm_hstate.saved_xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000446 get_paca()->kvm_hstate.saved_xirr = 0;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000447 return xirr;
448}
449
Michael Roth3a83f672019-09-11 17:31:55 -0500450/*
451 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
452 * a CPU thread that's running/napping inside of a guest is by default regarded
453 * as a request to wake the CPU (if needed) and continue execution within the
454 * guest, potentially to process new state like externally-generated
455 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
456 *
457 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
458 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
459 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
460 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
461 * the receiving side prior to processing the IPI work.
462 *
463 * NOTE:
464 *
465 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
466 * This is to guard against sequences such as the following:
467 *
468 * CPU
469 * X: smp_muxed_ipi_set_message():
470 * X: smp_mb()
471 * X: message[RESCHEDULE] = 1
472 * X: doorbell_global_ipi(42):
473 * X: kvmppc_set_host_ipi(42)
474 * X: ppc_msgsnd_sync()/smp_mb()
475 * X: ppc_msgsnd() -> 42
476 * 42: doorbell_exception(): // from CPU X
477 * 42: ppc_msgsync()
478 * 105: smp_muxed_ipi_set_message():
479 * 105: smb_mb()
480 * // STORE DEFERRED DUE TO RE-ORDERING
481 * --105: message[CALL_FUNCTION] = 1
482 * | 105: doorbell_global_ipi(42):
483 * | 105: kvmppc_set_host_ipi(42)
484 * | 42: kvmppc_clear_host_ipi(42)
485 * | 42: smp_ipi_demux_relaxed()
486 * | 42: // returns to executing guest
487 * | // RE-ORDERED STORE COMPLETES
488 * ->105: message[CALL_FUNCTION] = 1
489 * 105: ppc_msgsnd_sync()/smp_mb()
490 * 105: ppc_msgsnd() -> 42
491 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
492 * 105: // hangs waiting on 42 to process messages/call_single_queue
493 *
494 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
495 * to guard against sequences such as the following (as well as to create
496 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
497 *
498 * CPU
499 * X: smp_muxed_ipi_set_message():
500 * X: smp_mb()
501 * X: message[RESCHEDULE] = 1
502 * X: doorbell_global_ipi(42):
503 * X: kvmppc_set_host_ipi(42)
504 * X: ppc_msgsnd_sync()/smp_mb()
505 * X: ppc_msgsnd() -> 42
506 * 42: doorbell_exception(): // from CPU X
507 * 42: ppc_msgsync()
508 * // STORE DEFERRED DUE TO RE-ORDERING
509 * -- 42: kvmppc_clear_host_ipi(42)
510 * | 42: smp_ipi_demux_relaxed()
511 * | 105: smp_muxed_ipi_set_message():
512 * | 105: smb_mb()
513 * | 105: message[CALL_FUNCTION] = 1
514 * | 105: doorbell_global_ipi(42):
515 * | 105: kvmppc_set_host_ipi(42)
516 * | // RE-ORDERED STORE COMPLETES
517 * -> 42: kvmppc_clear_host_ipi(42)
518 * 42: // returns to executing guest
519 * 105: ppc_msgsnd_sync()/smp_mb()
520 * 105: ppc_msgsnd() -> 42
521 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
522 * 105: // hangs waiting on 42 to process messages/call_single_queue
523 */
524static inline void kvmppc_set_host_ipi(int cpu)
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000525{
Michael Roth3a83f672019-09-11 17:31:55 -0500526 /*
527 * order stores of IPI messages vs. setting of host_ipi flag
528 *
529 * pairs with the barrier in kvmppc_clear_host_ipi()
530 */
531 smp_mb();
532 paca_ptrs[cpu]->kvm_hstate.host_ipi = 1;
533}
534
535static inline void kvmppc_clear_host_ipi(int cpu)
536{
537 paca_ptrs[cpu]->kvm_hstate.host_ipi = 0;
538 /*
539 * order clearing of host_ipi flag vs. processing of IPI messages
540 *
541 * pairs with the barrier in kvmppc_set_host_ipi()
542 */
543 smp_mb();
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000544}
545
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530546static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
547{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530548 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530549}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000550
Michael Ellerman441c19c2014-05-23 18:15:25 +1000551extern void kvm_hv_vm_activated(void);
552extern void kvm_hv_vm_deactivated(void);
553extern bool kvm_hv_mode_active(void);
554
Paul Mackerras70ea13f2019-04-29 19:02:58 +1000555extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
556 struct kvm_nested_guest *nested);
Paul Mackerras2940ba02019-04-29 19:00:40 +1000557
Paul Mackerras371fefd2011-06-29 00:23:08 +0000558#else
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530559static inline void __init kvm_cma_reserve(void)
560{}
561
Paul Mackerras371fefd2011-06-29 00:23:08 +0000562static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
563{}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000564
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000565static inline void kvmppc_set_xive_tima(int cpu,
566 unsigned long phys_addr,
567 void __iomem *virt_addr)
568{}
569
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000570static inline u32 kvmppc_get_xics_latch(void)
571{
572 return 0;
573}
574
Michael Roth3a83f672019-09-11 17:31:55 -0500575static inline void kvmppc_set_host_ipi(int cpu)
576{}
577
578static inline void kvmppc_clear_host_ipi(int cpu)
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000579{}
580
581static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
582{
583 kvm_vcpu_kick(vcpu);
584}
Michael Ellerman441c19c2014-05-23 18:15:25 +1000585
586static inline bool kvm_hv_mode_active(void) { return false; }
587
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000588#endif
589
590#ifdef CONFIG_KVM_XICS
591static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
592{
593 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
594}
Suresh Warrier8daaafc2016-08-19 15:35:48 +1000595
596static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
597 struct kvm *kvm)
598{
Suresh Warrier644abbb2016-08-19 15:35:54 +1000599 if (kvm && kvm_irq_bypass)
Suresh Warrier8daaafc2016-08-19 15:35:48 +1000600 return kvm->arch.pimap;
601 return NULL;
602}
603
Suresh Warrier79b6c242015-12-17 14:59:06 -0600604extern void kvmppc_alloc_host_rm_ops(void);
605extern void kvmppc_free_host_rm_ops(void);
Suresh Warrier8daaafc2016-08-19 15:35:48 +1000606extern void kvmppc_free_pimap(struct kvm *kvm);
Suresh Warrierf7af5202016-08-19 15:35:52 +1000607extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000608extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000609extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
Nicholas Piggin9dc2bab2021-05-28 19:07:33 +1000610extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
Paul Mackerras8b786452013-04-17 20:32:26 +0000611extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
612extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000613extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
614 struct kvm_vcpu *vcpu, u32 cpu);
Suresh Warrier0c2a6602015-12-17 14:59:09 -0600615extern void kvmppc_xics_ipi_action(void);
Paul Mackerras5d375192016-08-19 15:35:56 +1000616extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
617 unsigned long host_irq);
618extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
619 unsigned long host_irq);
Paul Mackerrasf7257582016-11-18 09:02:08 +1100620extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
621 struct kvmppc_irq_map *irq_map,
622 struct kvmppc_passthru_irqmap *pimap,
623 bool *again);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000624
625extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
626 int level, bool line_status);
627
Suresh E. Warrier520fe9c2015-12-21 16:33:57 -0600628extern int h_ipi_redirect;
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000629#else
Suresh Warrier8daaafc2016-08-19 15:35:48 +1000630static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
631 struct kvm *kvm)
632 { return NULL; }
Chengyang Fan6c6fdbb2021-01-25 17:53:38 +0800633static inline void kvmppc_alloc_host_rm_ops(void) {}
634static inline void kvmppc_free_host_rm_ops(void) {}
635static inline void kvmppc_free_pimap(struct kvm *kvm) {}
Suresh Warrierf7af5202016-08-19 15:35:52 +1000636static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
637 { return 0; }
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000638static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
639 { return 0; }
640static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000641static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
642 { return 0; }
Nicholas Piggin9dc2bab2021-05-28 19:07:33 +1000643static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
644 { return 0; }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000645#endif
646
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000647#ifdef CONFIG_KVM_XIVE
648/*
649 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
650 * ie. P9 new interrupt controller, while the second "xive" is the legacy
651 * "eXternal Interrupt Vector Entry" which is the configuration of an
652 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
653 * two function consume or produce a legacy "XIVE" state from the
654 * new "XIVE" interrupt controller.
655 */
656extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
657 u32 priority);
658extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
659 u32 *priority);
660extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
661extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000662
663extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
664 struct kvm_vcpu *vcpu, u32 cpu);
665extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
666extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
667 struct irq_desc *host_desc);
668extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
669 struct irq_desc *host_desc);
670extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
671extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
672
673extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
674 int level, bool line_status);
Paul Mackerras95a64322018-10-08 16:30:55 +1100675extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
Nicholas Piggin023c3c92021-05-28 19:07:28 +1000676extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
Nicholas Piggin9dc2bab2021-05-28 19:07:33 +1000677extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
Cédric Le Goater90c73792019-04-18 12:39:27 +0200678
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200679static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
680{
681 return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
682}
683
684extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
685 struct kvm_vcpu *vcpu, u32 cpu);
686extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
Cédric Le Goatere4945b92019-04-18 12:39:35 +0200687extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
688 union kvmppc_one_reg *val);
689extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
690 union kvmppc_one_reg *val);
Paul Mackerras2ad7a272019-08-26 16:21:21 +1000691extern bool kvmppc_xive_native_supported(void);
Cédric Le Goater90c73792019-04-18 12:39:27 +0200692
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000693#else
694static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
695 u32 priority) { return -1; }
696static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
697 u32 *priority) { return -1; }
698static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
699static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000700
701static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
702 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
703static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
704static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
705 struct irq_desc *host_desc) { return -ENODEV; }
706static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
707 struct irq_desc *host_desc) { return -ENODEV; }
708static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
709static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
710
711static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
712 int level, bool line_status) { return -ENODEV; }
Paul Mackerras95a64322018-10-08 16:30:55 +1100713static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
Nicholas Piggin023c3c92021-05-28 19:07:28 +1000714static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
Nicholas Piggin9dc2bab2021-05-28 19:07:33 +1000715static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
Cédric Le Goater90c73792019-04-18 12:39:27 +0200716
Cédric Le Goatereacc56b2019-04-18 12:39:28 +0200717static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
718 { return 0; }
719static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
720 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
721static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
Cédric Le Goatere4945b92019-04-18 12:39:35 +0200722static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
723 union kvmppc_one_reg *val)
724{ return 0; }
725static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
726 union kvmppc_one_reg *val)
727{ return -ENOENT; }
Cédric Le Goater90c73792019-04-18 12:39:27 +0200728
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000729#endif /* CONFIG_KVM_XIVE */
730
Paul Mackerrase74d53e2019-02-25 14:35:06 +1100731#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
Paul Mackerras03f95332019-02-04 22:07:20 +1100732static inline bool xics_on_xive(void)
733{
734 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
735}
736#else
737static inline bool xics_on_xive(void)
738{
739 return false;
740}
741#endif
742
Suresh Warrier79b6c242015-12-17 14:59:06 -0600743/*
Paul Mackerrase34af782016-12-01 14:03:46 +1100744 * Prototypes for functions called only from assembler code.
745 * Having prototypes reduces sparse errors.
746 */
747long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
748 unsigned long ioba, unsigned long tce);
749long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
750 unsigned long liobn, unsigned long ioba,
751 unsigned long tce_list, unsigned long npages);
752long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
753 unsigned long liobn, unsigned long ioba,
754 unsigned long tce_value, unsigned long npages);
755long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
756 unsigned int yield_count);
Nicholas Piggindcbac732021-05-28 19:07:44 +1000757long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
Paul Mackerrase34af782016-12-01 14:03:46 +1100758void kvmhv_commence_exit(int trap);
Paul Mackerras884dfb72019-02-21 13:38:49 +1100759void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
Paul Mackerrase34af782016-12-01 14:03:46 +1100760void kvmppc_subcore_enter_guest(void);
761void kvmppc_subcore_exit_guest(void);
762long kvmppc_realmode_hmi_handler(void);
763long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
764 long pte_index, unsigned long pteh, unsigned long ptel);
765long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
766 unsigned long pte_index, unsigned long avpn);
767long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
768long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
Nicholas Piggin6c12c432021-04-12 11:48:40 +1000769 unsigned long pte_index, unsigned long avpn);
Paul Mackerrase34af782016-12-01 14:03:46 +1100770long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
771 unsigned long pte_index);
772long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
773 unsigned long pte_index);
774long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
775 unsigned long pte_index);
Suraj Jitindar Singheadfb1c2019-03-22 17:05:45 +1100776long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
777 unsigned long dest, unsigned long src);
Paul Mackerrase34af782016-12-01 14:03:46 +1100778long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
779 unsigned long slb_v, unsigned int status, bool data);
780unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +1000781unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
782unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
Paul Mackerrase34af782016-12-01 14:03:46 +1100783int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
784 unsigned long mfrr);
785int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
786int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
Paul Mackerrasf7035ce2018-10-08 16:30:50 +1100787void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
Paul Mackerrase34af782016-12-01 14:03:46 +1100788
789/*
Suresh Warrier79b6c242015-12-17 14:59:06 -0600790 * Host-side operations we want to set up while running in real
791 * mode in the guest operating on the xics.
792 * Currently only VCPU wakeup is supported.
793 */
794
795union kvmppc_rm_state {
796 unsigned long raw;
797 struct {
798 u32 in_host;
799 u32 rm_action;
800 };
801};
802
803struct kvmppc_host_rm_core {
804 union kvmppc_rm_state rm_state;
805 void *rm_data;
806 char pad[112];
807};
808
809struct kvmppc_host_rm_ops {
810 struct kvmppc_host_rm_core *rm_core;
811 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
812};
813
814extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
815
Bharat Bhushan34f754b2014-07-17 17:01:40 +0530816static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
817{
818#ifdef CONFIG_KVM_BOOKE_HV
819 return mfspr(SPRN_GEPR);
820#elif defined(CONFIG_BOOKE)
821 return vcpu->arch.epr;
822#else
823 return 0;
824#endif
825}
826
Alexander Graf1c810632013-01-04 18:12:48 +0100827static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
828{
829#ifdef CONFIG_KVM_BOOKE_HV
830 mtspr(SPRN_GEPR, epr);
831#elif defined(CONFIG_BOOKE)
832 vcpu->arch.epr = epr;
833#endif
834}
835
Scott Wood5df554ad2013-04-12 14:08:46 +0000836#ifdef CONFIG_KVM_MPIC
837
838void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000839int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
840 u32 cpu);
841void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
Scott Wood5df554ad2013-04-12 14:08:46 +0000842
843#else
844
845static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
846{
847}
848
Scott Woodeb1e4f42013-04-12 14:08:47 +0000849static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
850 struct kvm_vcpu *vcpu, u32 cpu)
851{
852 return -EINVAL;
853}
854
855static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
856 struct kvm_vcpu *vcpu)
857{
858}
859
Scott Wood5df554ad2013-04-12 14:08:46 +0000860#endif /* CONFIG_KVM_MPIC */
861
Scott Wooddc83b8b2011-08-18 15:25:21 -0500862int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
863 struct kvm_config_tlb *cfg);
864int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
865 struct kvm_dirty_tlb *cfg);
866
Scott Wood043cc4d2011-12-20 15:34:20 +0000867long kvmppc_alloc_lpid(void);
868void kvmppc_claim_lpid(long lpid);
869void kvmppc_free_lpid(long lpid);
870void kvmppc_init_lpid(unsigned long nr_lpids);
871
Dan Williamsba049e92016-01-15 16:56:11 -0800872static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
Alexander Graf249ba1e2012-08-03 13:56:33 +0200873{
Alexander Graf249ba1e2012-08-03 13:56:33 +0200874 struct page *page;
Bharat Bhushanadccf652013-04-25 06:33:57 +0000875 /*
876 * We can only access pages that the kernel maps
877 * as memory. Bail out for unmapped ones.
878 */
879 if (!pfn_valid(pfn))
880 return;
881
882 /* Clear i-cache for new pages */
Alexander Graf249ba1e2012-08-03 13:56:33 +0200883 page = pfn_to_page(pfn);
Aneesh Kumar K.Vec94b9b2021-02-03 10:28:11 +0530884 if (!test_bit(PG_dcache_clean, &page->flags)) {
Alexander Graf249ba1e2012-08-03 13:56:33 +0200885 flush_dcache_icache_page(page);
Aneesh Kumar K.Vec94b9b2021-02-03 10:28:11 +0530886 set_bit(PG_dcache_clean, &page->flags);
Alexander Graf249ba1e2012-08-03 13:56:33 +0200887 }
888}
889
Scott Wood5f1c2482013-07-10 17:47:39 -0500890/*
Alexander Graf5deb8e72014-04-24 13:46:24 +0200891 * Shared struct helpers. The shared struct can be little or big endian,
892 * depending on the guest endianness. So expose helpers to all of them.
893 */
894static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
895{
896#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
897 /* Only Book3S_64 PR supports bi-endian for now */
898 return vcpu->arch.shared_big_endian;
899#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
900 /* Book3s_64 HV on little endian is always little endian */
901 return false;
902#else
903 return true;
904#endif
905}
906
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530907#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530908static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
909{ \
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530910 return mfspr(bookehv_spr); \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530911} \
912
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530913#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530914static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
915{ \
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530916 mtspr(bookehv_spr, val); \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530917} \
918
Alexander Graf5deb8e72014-04-24 13:46:24 +0200919#define SHARED_WRAPPER_GET(reg, size) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530920static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
Alexander Graf5deb8e72014-04-24 13:46:24 +0200921{ \
922 if (kvmppc_shared_big_endian(vcpu)) \
923 return be##size##_to_cpu(vcpu->arch.shared->reg); \
924 else \
925 return le##size##_to_cpu(vcpu->arch.shared->reg); \
926} \
927
928#define SHARED_WRAPPER_SET(reg, size) \
929static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
930{ \
931 if (kvmppc_shared_big_endian(vcpu)) \
932 vcpu->arch.shared->reg = cpu_to_be##size(val); \
933 else \
934 vcpu->arch.shared->reg = cpu_to_le##size(val); \
935} \
936
937#define SHARED_WRAPPER(reg, size) \
938 SHARED_WRAPPER_GET(reg, size) \
939 SHARED_WRAPPER_SET(reg, size) \
940
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530941#define SPRNG_WRAPPER(reg, bookehv_spr) \
942 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
943 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530944
945#ifdef CONFIG_KVM_BOOKE_HV
946
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530947#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
948 SPRNG_WRAPPER(reg, bookehv_spr) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530949
950#else
951
Bharat Bhushan5a484c72014-07-30 15:03:56 +0530952#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530953 SHARED_WRAPPER(reg, size) \
954
955#endif
956
Alexander Graf5deb8e72014-04-24 13:46:24 +0200957SHARED_WRAPPER(critical, 64)
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530958SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
959SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
960SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
961SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
962SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
963SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
964SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
Bharat Bhushandc168542014-07-17 17:01:38 +0530965SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
Alexander Graf5deb8e72014-04-24 13:46:24 +0200966SHARED_WRAPPER_GET(msr, 64)
967static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
968{
969 if (kvmppc_shared_big_endian(vcpu))
970 vcpu->arch.shared->msr = cpu_to_be64(val);
971 else
972 vcpu->arch.shared->msr = cpu_to_le64(val);
973}
974SHARED_WRAPPER(dsisr, 32)
975SHARED_WRAPPER(int_pending, 32)
976SHARED_WRAPPER(sprg4, 64)
977SHARED_WRAPPER(sprg5, 64)
978SHARED_WRAPPER(sprg6, 64)
979SHARED_WRAPPER(sprg7, 64)
980
981static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
982{
983 if (kvmppc_shared_big_endian(vcpu))
984 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
985 else
986 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
987}
988
989static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
990{
991 if (kvmppc_shared_big_endian(vcpu))
992 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
993 else
994 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
995}
996
997/*
Scott Wood5f1c2482013-07-10 17:47:39 -0500998 * Please call after prepare_to_enter. This function puts the lazy ee and irq
999 * disabled tracking state back to normal mode, without actually enabling
1000 * interrupts.
1001 */
1002static inline void kvmppc_fix_ee_before_entry(void)
Alexander Grafbd2be682012-08-13 01:04:19 +02001003{
Scott Wood5f1c2482013-07-10 17:47:39 -05001004 trace_hardirqs_on();
1005
Alexander Grafbd2be682012-08-13 01:04:19 +02001006#ifdef CONFIG_PPC64
Scott Wood6c85f522014-01-09 19:18:40 -06001007 /*
1008 * To avoid races, the caller must have gone directly from having
1009 * interrupts fully-enabled to hard-disabled.
1010 */
1011 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1012
Alexander Grafbd2be682012-08-13 01:04:19 +02001013 /* Only need to enable IRQs by hard enabling them after this */
1014 local_paca->irq_happened = 0;
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +05301015 irq_soft_mask_set(IRQS_ENABLED);
Alexander Grafbd2be682012-08-13 01:04:19 +02001016#endif
1017}
Alexander Graf249ba1e2012-08-03 13:56:33 +02001018
Mihai Caraman7cdd7a92012-10-11 06:13:22 +00001019static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1020{
1021 ulong ea;
Mihai Caraman8823a8f2012-10-11 06:13:23 +00001022 ulong msr_64bit = 0;
Mihai Caraman7cdd7a92012-10-11 06:13:22 +00001023
1024 ea = kvmppc_get_gpr(vcpu, rb);
1025 if (ra)
1026 ea += kvmppc_get_gpr(vcpu, ra);
1027
Mihai Caraman8823a8f2012-10-11 06:13:23 +00001028#if defined(CONFIG_PPC_BOOK3E_64)
1029 msr_64bit = MSR_CM;
1030#elif defined(CONFIG_PPC_BOOK3S_64)
1031 msr_64bit = MSR_SF;
1032#endif
1033
Alexander Graf5deb8e72014-04-24 13:46:24 +02001034 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
Mihai Caraman8823a8f2012-10-11 06:13:23 +00001035 ea = (uint32_t)ea;
1036
Mihai Caraman7cdd7a92012-10-11 06:13:22 +00001037 return ea;
1038}
1039
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001040extern void xics_wake_cpu(int cpu);
1041
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001042#endif /* __POWERPC_KVM_PPC_H__ */