blob: a3584a357f353c86ba162e2edbecfda1dffa4e1e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Isaku Yamahatae04d0d02008-04-02 10:53:55 -07002#ifndef INCLUDE_XEN_OPS_H
3#define INCLUDE_XEN_OPS_H
4
5#include <linux/percpu.h>
Stanislaw Gruszkacd979882014-02-26 11:30:30 +01006#include <linux/notifier.h>
Daniel Kiperbe81c8a2014-06-30 19:53:02 +02007#include <linux/efi.h>
Juergen Grossf030aad2018-08-28 09:40:13 +02008#include <xen/features.h>
Ian Campbell7892f692012-10-16 17:19:15 +01009#include <asm/xen/interface.h>
Stefano Stabellini4ccefbe2015-11-05 15:15:07 +000010#include <xen/interface/vcpu.h>
Isaku Yamahatae04d0d02008-04-02 10:53:55 -070011
12DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu);
13
Vitaly Kuznetsov55467de2016-07-29 11:06:48 +020014DECLARE_PER_CPU(uint32_t, xen_vcpu_id);
15static inline uint32_t xen_vcpu_nr(int cpu)
Vitaly Kuznetsov88e957d2016-06-30 17:56:37 +020016{
17 return per_cpu(xen_vcpu_id, cpu);
18}
19
Ankur Arora0b64ffb2017-06-02 17:05:59 -070020#define XEN_VCPU_ID_INVALID U32_MAX
21
Ian Campbell03c81422011-02-17 11:04:20 +000022void xen_arch_pre_suspend(void);
23void xen_arch_post_suspend(int suspend_cancelled);
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +010024
Isaku Yamahataad55db92008-07-08 15:06:32 -070025void xen_timer_resume(void);
26void xen_arch_resume(void);
Boris Ostrovsky2b953a52015-04-28 18:46:20 -040027void xen_arch_suspend(void);
Isaku Yamahataad55db92008-07-08 15:06:32 -070028
Julien Grall5d9404e2017-04-24 18:58:37 +010029void xen_reboot(int reason);
30
Stanislaw Gruszkacd979882014-02-26 11:30:30 +010031void xen_resume_notifier_register(struct notifier_block *nb);
32void xen_resume_notifier_unregister(struct notifier_block *nb);
33
Stefano Stabellini4ccefbe2015-11-05 15:15:07 +000034bool xen_vcpu_stolen(int vcpu);
35void xen_setup_runstate_info(int cpu);
Juergen Grossecb23dc2016-05-20 09:26:48 +020036void xen_time_setup_guest(void);
Dongli Zhang5e25f5d2017-11-01 09:46:33 +080037void xen_manage_runstate_time(int action);
Stefano Stabellini4ccefbe2015-11-05 15:15:07 +000038void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
Juergen Grossd34c30c2016-07-26 14:15:11 +020039u64 xen_steal_clock(int cpu);
Stefano Stabellini4ccefbe2015-11-05 15:15:07 +000040
Stefano Stabellini016b6f52010-05-14 12:45:07 +010041int xen_setup_shutdown_event(void);
42
Alex Nixon08bbc9d2009-02-09 12:05:46 -080043extern unsigned long *xen_contiguous_bitmap;
Vitaly Kuznetsov16624392017-03-14 18:35:54 +010044
Stefano Stabellinif9005572018-10-31 16:11:49 -070045#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000046int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
Stefano Stabellini69908902013-10-09 16:56:32 +000047 unsigned int address_bits,
48 dma_addr_t *dma_handle);
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000049void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
Stefano Stabellinif9005572018-10-31 16:11:49 -070050#endif
Juergen Grossf030aad2018-08-28 09:40:13 +020051
Stefano Stabellinif9005572018-10-31 16:11:49 -070052#if defined(CONFIG_XEN_PV)
53int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
54 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
Jan Beulich97315722021-09-22 12:18:25 +020055 unsigned int domid, bool no_translate);
Stefano Stabellinif9005572018-10-31 16:11:49 -070056#else
Juergen Grossf030aad2018-08-28 09:40:13 +020057static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
58 xen_pfn_t *pfn, int nr, int *err_ptr,
59 pgprot_t prot, unsigned int domid,
Jan Beulich97315722021-09-22 12:18:25 +020060 bool no_translate)
Juergen Grossf030aad2018-08-28 09:40:13 +020061{
62 BUG();
63 return 0;
64}
Vitaly Kuznetsov16624392017-03-14 18:35:54 +010065#endif
Alex Nixon08bbc9d2009-02-09 12:05:46 -080066
David Howellsc140d872012-03-28 18:30:02 +010067struct vm_area_struct;
David Vrabel4e8c0c82015-03-11 14:49:57 +000068
Paul Durrantec4001c2017-11-03 17:04:11 +000069#ifdef CONFIG_XEN_AUTO_XLATE
David Vrabel4e8c0c82015-03-11 14:49:57 +000070int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
David Vrabel628c28e2015-03-11 14:49:56 +000071 unsigned long addr,
David Vrabel4e8c0c82015-03-11 14:49:57 +000072 xen_pfn_t *gfn, int nr,
73 int *err_ptr, pgprot_t prot,
Juergen Grossf030aad2018-08-28 09:40:13 +020074 unsigned int domid,
David Vrabel628c28e2015-03-11 14:49:56 +000075 struct page **pages);
76int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
77 int nr, struct page **pages);
Paul Durrantec4001c2017-11-03 17:04:11 +000078#else
79/*
80 * These two functions are called from arch/x86/xen/mmu.c and so stubs
81 * are needed for a configuration not specifying CONFIG_XEN_AUTO_XLATE.
82 */
83static inline int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
84 unsigned long addr,
85 xen_pfn_t *gfn, int nr,
86 int *err_ptr, pgprot_t prot,
87 unsigned int domid,
88 struct page **pages)
89{
90 return -EOPNOTSUPP;
91}
92
93static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
94 int nr, struct page **pages)
95{
96 return -EOPNOTSUPP;
97}
98#endif
99
Arnd Bergmanna78d14a2019-07-22 09:46:29 +0200100int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
101 unsigned long len);
102
Juergen Grossf030aad2018-08-28 09:40:13 +0200103/*
104 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
105 * @vma: VMA to map the pages into
106 * @addr: Address at which to map the pages
107 * @gfn: Array of GFNs to map
108 * @nr: Number entries in the GFN array
109 * @err_ptr: Returns per-GFN error status.
110 * @prot: page protection mask
111 * @domid: Domain owning the pages
112 * @pages: Array of pages if this domain has an auto-translated physmap
113 *
114 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
115 * overwritten by the error codes after they are mapped.
116 *
117 * Returns the number of successfully mapped frames, or a -ve error
118 * code.
119 */
120static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
121 unsigned long addr,
122 xen_pfn_t *gfn, int nr,
123 int *err_ptr, pgprot_t prot,
124 unsigned int domid,
125 struct page **pages)
126{
127 if (xen_feature(XENFEAT_auto_translated_physmap))
128 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
129 prot, domid, pages);
130
131 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
132 * and the consequences later is quite hard to detect what the actual
133 * cause of "wrong memory was mapped in".
134 */
135 BUG_ON(err_ptr == NULL);
136 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
Jan Beulich97315722021-09-22 12:18:25 +0200137 false);
Juergen Grossf030aad2018-08-28 09:40:13 +0200138}
139
140/*
141 * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn
142 * @vma: VMA to map the pages into
143 * @addr: Address at which to map the pages
144 * @mfn: Array of MFNs to map
145 * @nr: Number entries in the MFN array
146 * @err_ptr: Returns per-MFN error status.
147 * @prot: page protection mask
148 * @domid: Domain owning the pages
Juergen Grossf030aad2018-08-28 09:40:13 +0200149 *
150 * @mfn and @err_ptr may point to the same buffer, the MFNs will be
151 * overwritten by the error codes after they are mapped.
152 *
153 * Returns the number of successfully mapped frames, or a -ve error
154 * code.
155 */
156static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
157 unsigned long addr, xen_pfn_t *mfn,
158 int nr, int *err_ptr,
Jan Beulich97315722021-09-22 12:18:25 +0200159 pgprot_t prot, unsigned int domid)
Juergen Grossf030aad2018-08-28 09:40:13 +0200160{
161 if (xen_feature(XENFEAT_auto_translated_physmap))
162 return -EOPNOTSUPP;
163
164 return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
Jan Beulich97315722021-09-22 12:18:25 +0200165 true);
Juergen Grossf030aad2018-08-28 09:40:13 +0200166}
167
168/* xen_remap_domain_gfn_range() - map a range of foreign frames
169 * @vma: VMA to map the pages into
170 * @addr: Address at which to map the pages
171 * @gfn: First GFN to map.
172 * @nr: Number frames to map
173 * @prot: page protection mask
174 * @domid: Domain owning the pages
175 * @pages: Array of pages if this domain has an auto-translated physmap
176 *
177 * Returns the number of successfully mapped frames, or a -ve error
178 * code.
179 */
180static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
181 unsigned long addr,
182 xen_pfn_t gfn, int nr,
183 pgprot_t prot, unsigned int domid,
184 struct page **pages)
185{
186 if (xen_feature(XENFEAT_auto_translated_physmap))
187 return -EOPNOTSUPP;
188
Jan Beulich97315722021-09-22 12:18:25 +0200189 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false);
Juergen Grossf030aad2018-08-28 09:40:13 +0200190}
191
192int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
193 int numpgs, struct page **pages);
194
Shannon Zhao243848f2016-04-07 20:03:19 +0800195int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
196 unsigned long nr_grant_frames);
Ian Campbellde1ef202009-05-21 10:09:46 +0100197
Konrad Rzeszutek Wilk394b40f2012-11-27 11:39:40 -0500198bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
Daniel Kiperbe81c8a2014-06-30 19:53:02 +0200199
Juergen Gross09515702019-10-01 10:25:34 +0200200void xen_efi_runtime_setup(void);
Julien Gralle371fd72017-04-24 18:58:39 +0100201
Daniel Kiperbe81c8a2014-06-30 19:53:02 +0200202
Thomas Gleixner2f6474e2020-05-21 22:05:26 +0200203#if defined(CONFIG_XEN_PV) && !defined(CONFIG_PREEMPTION)
David Vrabelfdfd8112015-02-19 15:23:17 +0000204
205DECLARE_PER_CPU(bool, xen_in_preemptible_hcall);
206
207static inline void xen_preemptible_hcall_begin(void)
208{
209 __this_cpu_write(xen_in_preemptible_hcall, true);
210}
211
212static inline void xen_preemptible_hcall_end(void)
213{
214 __this_cpu_write(xen_in_preemptible_hcall, false);
215}
216
Thomas Gleixner2f6474e2020-05-21 22:05:26 +0200217#else
218
219static inline void xen_preemptible_hcall_begin(void) { }
220static inline void xen_preemptible_hcall_end(void) { }
221
222#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
David Vrabelfdfd8112015-02-19 15:23:17 +0000223
Isaku Yamahatae04d0d02008-04-02 10:53:55 -0700224#endif /* INCLUDE_XEN_OPS_H */