blob: fd81228e8037fe29fd27d805ed3fb7e617a45606 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
Rusty Russelld3561b72006-12-07 02:14:07 +01004/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
Jeremy Fitzhardingeb239fb22007-05-02 19:27:13 +02006
7#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge54321d92009-02-11 10:20:05 -08008#include <asm/pgtable_types.h>
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +01009#include <asm/asm.h>
Rusty Russelld3561b72006-12-07 02:14:07 +010010
Jeremy Fitzhardingeac5672f2009-04-14 14:29:44 -070011#include <asm/paravirt_types.h>
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -080012
Rusty Russelld3561b72006-12-07 02:14:07 +010013#ifndef __ASSEMBLY__
Paul Gortmaker187f1882011-11-23 20:12:59 -050014#include <linux/bug.h>
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +020015#include <linux/types.h>
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +020016#include <linux/cpumask.h>
Josh Poimboeuf87b240c2016-01-21 16:49:13 -060017#include <asm/frame.h>
Jeremy Fitzhardinge1a45b7a2007-05-02 19:27:15 +020018
H. Peter Anvinfaca6222008-01-30 13:31:02 +010019static inline void load_sp0(struct tss_struct *tss,
Rusty Russelld3561b72006-12-07 02:14:07 +010020 struct thread_struct *thread)
21{
H. Peter Anvinfaca6222008-01-30 13:31:02 +010022 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
Rusty Russelld3561b72006-12-07 02:14:07 +010023}
24
Rusty Russelld3561b72006-12-07 02:14:07 +010025/* The paravirtualized CPUID instruction. */
26static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
27 unsigned int *ecx, unsigned int *edx)
28{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070029 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
Rusty Russelld3561b72006-12-07 02:14:07 +010030}
31
32/*
33 * These special macros can be used to get or set a debugging register
34 */
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020035static inline unsigned long paravirt_get_debugreg(int reg)
36{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070037 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020038}
39#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
40static inline void set_debugreg(unsigned long val, int reg)
41{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070042 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020043}
Rusty Russelld3561b72006-12-07 02:14:07 +010044
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020045static inline unsigned long read_cr0(void)
46{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070047 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020048}
Rusty Russelld3561b72006-12-07 02:14:07 +010049
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020050static inline void write_cr0(unsigned long x)
51{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070052 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020053}
Rusty Russelld3561b72006-12-07 02:14:07 +010054
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020055static inline unsigned long read_cr2(void)
56{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070057 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020058}
Rusty Russelld3561b72006-12-07 02:14:07 +010059
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020060static inline void write_cr2(unsigned long x)
61{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070062 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020063}
Rusty Russelld3561b72006-12-07 02:14:07 +010064
Andy Lutomirski6c690ee2017-06-12 10:26:14 -070065static inline unsigned long __read_cr3(void)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020066{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070067 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020068}
69
70static inline void write_cr3(unsigned long x)
71{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070072 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020073}
74
Andy Lutomirski1e02ce42014-10-24 15:58:08 -070075static inline void __write_cr4(unsigned long x)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020076{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070077 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +020078}
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +020079
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +010080#ifdef CONFIG_X86_64
Glauber de Oliveira Costa4c9890c2008-01-30 13:33:19 +010081static inline unsigned long read_cr8(void)
82{
83 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
84}
85
86static inline void write_cr8(unsigned long x)
87{
88 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
89}
Glauber de Oliveira Costa94ea03c2008-01-30 13:33:19 +010090#endif
Glauber de Oliveira Costa4c9890c2008-01-30 13:33:19 +010091
David Howellsdf9ee292010-10-07 14:08:55 +010092static inline void arch_safe_halt(void)
Rusty Russelld3561b72006-12-07 02:14:07 +010093{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -070094 PVOP_VCALL0(pv_irq_ops.safe_halt);
Rusty Russelld3561b72006-12-07 02:14:07 +010095}
96
97static inline void halt(void)
98{
Cliff Wickmanc8217b82010-12-13 10:51:57 -060099 PVOP_VCALL0(pv_irq_ops.halt);
Rusty Russelld3561b72006-12-07 02:14:07 +0100100}
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200101
102static inline void wbinvd(void)
103{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700104 PVOP_VCALL0(pv_cpu_ops.wbinvd);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200105}
Rusty Russelld3561b72006-12-07 02:14:07 +0100106
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700107#define get_kernel_rpl() (pv_info.kernel_rpl)
Rusty Russelld3561b72006-12-07 02:14:07 +0100108
Andy Lutomirskidd2f4a02016-04-02 07:01:38 -0700109static inline u64 paravirt_read_msr(unsigned msr)
110{
111 return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
112}
113
114static inline void paravirt_write_msr(unsigned msr,
115 unsigned low, unsigned high)
116{
Anton Vasilyeve8ad8bc2017-06-23 19:23:13 +0300117 PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
Andy Lutomirskidd2f4a02016-04-02 07:01:38 -0700118}
119
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700120static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200121{
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700122 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200123}
Borislav Petkov132ec922009-08-31 09:50:09 +0200124
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700125static inline int paravirt_write_msr_safe(unsigned msr,
126 unsigned low, unsigned high)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200127{
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700128 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200129}
130
Joe Perches49cd7402008-03-23 01:03:00 -0700131#define rdmsr(msr, val1, val2) \
132do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700133 u64 _l = paravirt_read_msr(msr); \
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200134 val1 = (u32)_l; \
135 val2 = _l >> 32; \
Joe Perches49cd7402008-03-23 01:03:00 -0700136} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100137
Joe Perches49cd7402008-03-23 01:03:00 -0700138#define wrmsr(msr, val1, val2) \
139do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700140 paravirt_write_msr(msr, val1, val2); \
Joe Perches49cd7402008-03-23 01:03:00 -0700141} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100142
Joe Perches49cd7402008-03-23 01:03:00 -0700143#define rdmsrl(msr, val) \
144do { \
Andy Lutomirski4985ce12016-04-02 07:01:39 -0700145 val = paravirt_read_msr(msr); \
Joe Perches49cd7402008-03-23 01:03:00 -0700146} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100147
Andy Lutomirski47edb652015-07-23 12:14:40 -0700148static inline void wrmsrl(unsigned msr, u64 val)
149{
150 wrmsr(msr, (u32)val, (u32)(val>>32));
151}
152
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700153#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
Rusty Russelld3561b72006-12-07 02:14:07 +0100154
155/* rdmsr with exception handling */
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700156#define rdmsr_safe(msr, a, b) \
157({ \
158 int _err; \
159 u64 _l = paravirt_read_msr_safe(msr, &_err); \
160 (*a) = (u32)_l; \
161 (*b) = _l >> 32; \
162 _err; \
Joe Perches49cd7402008-03-23 01:03:00 -0700163})
Rusty Russelld3561b72006-12-07 02:14:07 +0100164
Andi Kleen1de87bd2008-03-22 10:59:28 +0100165static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
166{
167 int err;
168
Andy Lutomirskic2ee03b2016-04-02 07:01:36 -0700169 *p = paravirt_read_msr_safe(msr, &err);
Andi Kleen1de87bd2008-03-22 10:59:28 +0100170 return err;
171}
Borislav Petkov177fed12009-08-31 09:50:10 +0200172
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700173static inline unsigned long long paravirt_sched_clock(void)
174{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700175 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
Jeremy Fitzhardinge688340e2007-07-17 18:37:04 -0700176}
Zachary Amsden6cb9a832007-03-05 00:30:35 -0800177
Ingo Molnarc5905af2012-02-24 08:31:31 +0100178struct static_key;
179extern struct static_key paravirt_steal_enabled;
180extern struct static_key paravirt_steal_rq_enabled;
Glauber Costa3c404b52011-07-11 15:28:15 -0400181
182static inline u64 paravirt_steal_clock(int cpu)
183{
184 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
185}
186
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200187static inline unsigned long long paravirt_read_pmc(int counter)
188{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700189 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200190}
191
Joe Perches49cd7402008-03-23 01:03:00 -0700192#define rdpmc(counter, low, high) \
193do { \
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200194 u64 _l = paravirt_read_pmc(counter); \
195 low = (u32)_l; \
196 high = _l >> 32; \
Joe Perches49cd7402008-03-23 01:03:00 -0700197} while (0)
Rusty Russelld3561b72006-12-07 02:14:07 +0100198
Andi Kleen1ff4d582012-06-05 17:56:50 -0700199#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
200
Jeremy Fitzhardinge38ffbe62008-07-23 14:21:18 -0700201static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
202{
203 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
204}
205
206static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
207{
208 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
209}
210
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200211static inline void load_TR_desc(void)
212{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700213 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200214}
Glauber de Oliveira Costa6b68f012008-01-30 13:31:12 +0100215static inline void load_gdt(const struct desc_ptr *dtr)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200216{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700217 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200218}
Glauber de Oliveira Costa6b68f012008-01-30 13:31:12 +0100219static inline void load_idt(const struct desc_ptr *dtr)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200220{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700221 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200222}
223static inline void set_ldt(const void *addr, unsigned entries)
224{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700225 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200226}
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200227static inline unsigned long paravirt_store_tr(void)
228{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700229 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200230}
231#define store_tr(tr) ((tr) = paravirt_store_tr())
232static inline void load_TLS(struct thread_struct *t, unsigned cpu)
233{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700234 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200235}
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100236
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400237#ifdef CONFIG_X86_64
238static inline void load_gs_index(unsigned int gs)
239{
240 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
241}
242#endif
243
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100244static inline void write_ldt_entry(struct desc_struct *dt, int entry,
245 const void *desc)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200246{
Glauber de Oliveira Costa75b8bb32008-01-30 13:31:13 +0100247 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200248}
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100249
250static inline void write_gdt_entry(struct desc_struct *dt, int entry,
251 void *desc, int type)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200252{
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100253 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200254}
Glauber de Oliveira Costa014b15b2008-01-30 13:31:13 +0100255
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100256static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200257{
Glauber de Oliveira Costa8d947342008-01-30 13:31:12 +0100258 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200259}
260static inline void set_iopl_mask(unsigned mask)
261{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700262 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200263}
Jeremy Fitzhardinge3dc494e2007-05-02 19:27:13 +0200264
Rusty Russelld3561b72006-12-07 02:14:07 +0100265/* The paravirtualized I/O functions */
Joe Perches49cd7402008-03-23 01:03:00 -0700266static inline void slow_down_io(void)
267{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700268 pv_cpu_ops.io_delay();
Rusty Russelld3561b72006-12-07 02:14:07 +0100269#ifdef REALLY_SLOW_IO
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700270 pv_cpu_ops.io_delay();
271 pv_cpu_ops.io_delay();
272 pv_cpu_ops.io_delay();
Rusty Russelld3561b72006-12-07 02:14:07 +0100273#endif
274}
275
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200276static inline void paravirt_activate_mm(struct mm_struct *prev,
277 struct mm_struct *next)
278{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700279 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200280}
281
Dave Hansena1ea1c02014-11-18 10:23:49 -0800282static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
283 struct mm_struct *mm)
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200284{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700285 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200286}
287
Dave Hansena1ea1c02014-11-18 10:23:49 -0800288static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200289{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700290 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
Jeremy Fitzhardinged6dd61c2007-05-02 19:27:14 +0200291}
292
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200293static inline void __flush_tlb(void)
294{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700295 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200296}
297static inline void __flush_tlb_global(void)
298{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700299 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200300}
301static inline void __flush_tlb_single(unsigned long addr)
302{
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700303 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200304}
Rusty Russellda181a82006-12-07 02:14:08 +0100305
Rusty Russell4595f962009-01-10 21:58:09 -0800306static inline void flush_tlb_others(const struct cpumask *cpumask,
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700307 const struct flush_tlb_info *info)
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +0200308{
Andy Lutomirskia2055ab2017-05-28 10:00:10 -0700309 PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
Jeremy Fitzhardinged4c10472007-05-02 19:27:15 +0200310}
311
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -0400312static inline int paravirt_pgd_alloc(struct mm_struct *mm)
313{
314 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
315}
316
317static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
318{
319 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
320}
321
Eduardo Habkostf8639932008-07-30 18:32:27 -0300322static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200323{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700324 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200325}
Eduardo Habkostf8639932008-07-30 18:32:27 -0300326static inline void paravirt_release_pte(unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200327{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700328 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200329}
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100330
Eduardo Habkostf8639932008-07-30 18:32:27 -0300331static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200332{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700333 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200334}
335
Eduardo Habkostf8639932008-07-30 18:32:27 -0300336static inline void paravirt_release_pmd(unsigned long pfn)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200337{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700338 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200339}
340
Eduardo Habkostf8639932008-07-30 18:32:27 -0300341static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700342{
343 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
344}
Eduardo Habkostf8639932008-07-30 18:32:27 -0300345static inline void paravirt_release_pud(unsigned long pfn)
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700346{
347 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
348}
349
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300350static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
351{
352 PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
353}
354
355static inline void paravirt_release_p4d(unsigned long pfn)
356{
357 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
358}
359
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100360static inline pte_t __pte(pteval_t val)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200361{
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100362 pteval_t ret;
363
364 if (sizeof(pteval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800365 ret = PVOP_CALLEE2(pteval_t,
366 pv_mmu_ops.make_pte,
367 val, (u64)val >> 32);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100368 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800369 ret = PVOP_CALLEE1(pteval_t,
370 pv_mmu_ops.make_pte,
371 val);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100372
Jeremy Fitzhardingec8e53932008-01-30 13:32:57 +0100373 return (pte_t) { .pte = ret };
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200374}
375
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100376static inline pteval_t pte_val(pte_t pte)
377{
378 pteval_t ret;
379
380 if (sizeof(pteval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800381 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
382 pte.pte, (u64)pte.pte >> 32);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100383 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800384 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
385 pte.pte);
Jeremy Fitzhardinge773221f2008-01-30 13:33:15 +0100386
387 return ret;
388}
389
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100390static inline pgd_t __pgd(pgdval_t val)
391{
392 pgdval_t ret;
393
394 if (sizeof(pgdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800395 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
396 val, (u64)val >> 32);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100397 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800398 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
399 val);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100400
401 return (pgd_t) { ret };
402}
403
404static inline pgdval_t pgd_val(pgd_t pgd)
405{
406 pgdval_t ret;
407
408 if (sizeof(pgdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800409 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
410 pgd.pgd, (u64)pgd.pgd >> 32);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100411 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800412 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
413 pgd.pgd);
Jeremy Fitzhardingeef385032008-01-30 13:33:15 +0100414
415 return ret;
416}
417
Jeremy Fitzhardinge08b882c2008-06-16 04:30:01 -0700418#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
419static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
420 pte_t *ptep)
421{
422 pteval_t ret;
423
424 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
425 mm, addr, ptep);
426
427 return (pte_t) { .pte = ret };
428}
429
430static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
431 pte_t *ptep, pte_t pte)
432{
433 if (sizeof(pteval_t) > sizeof(long))
434 /* 5 arg words */
435 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
436 else
437 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
438 mm, addr, ptep, pte.pte);
439}
440
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100441static inline void set_pte(pte_t *ptep, pte_t pte)
442{
443 if (sizeof(pteval_t) > sizeof(long))
444 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
445 pte.pte, (u64)pte.pte >> 32);
446 else
447 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
448 pte.pte);
449}
450
451static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
452 pte_t *ptep, pte_t pte)
453{
454 if (sizeof(pteval_t) > sizeof(long))
455 /* 5 arg words */
456 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
457 else
458 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
459}
460
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100461static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
462{
463 pmdval_t val = native_pmd_val(pmd);
464
465 if (sizeof(pmdval_t) > sizeof(long))
466 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
467 else
468 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
469}
470
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700471#if CONFIG_PGTABLE_LEVELS >= 3
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100472static inline pmd_t __pmd(pmdval_t val)
473{
474 pmdval_t ret;
475
476 if (sizeof(pmdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800477 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
478 val, (u64)val >> 32);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100479 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800480 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
481 val);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100482
483 return (pmd_t) { ret };
484}
485
486static inline pmdval_t pmd_val(pmd_t pmd)
487{
488 pmdval_t ret;
489
490 if (sizeof(pmdval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800491 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
492 pmd.pmd, (u64)pmd.pmd >> 32);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100493 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800494 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
495 pmd.pmd);
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100496
497 return ret;
498}
499
500static inline void set_pud(pud_t *pudp, pud_t pud)
501{
502 pudval_t val = native_pud_val(pud);
503
504 if (sizeof(pudval_t) > sizeof(long))
505 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
506 val, (u64)val >> 32);
507 else
508 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
509 val);
510}
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300511#if CONFIG_PGTABLE_LEVELS >= 4
Eduardo Habkost90422192008-01-30 13:33:20 +0100512static inline pud_t __pud(pudval_t val)
513{
514 pudval_t ret;
515
516 if (sizeof(pudval_t) > sizeof(long))
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800517 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
518 val, (u64)val >> 32);
Eduardo Habkost90422192008-01-30 13:33:20 +0100519 else
Jeremy Fitzhardingeda5de7c2009-01-28 14:35:07 -0800520 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
521 val);
Eduardo Habkost90422192008-01-30 13:33:20 +0100522
523 return (pud_t) { ret };
524}
525
526static inline pudval_t pud_val(pud_t pud)
527{
528 pudval_t ret;
529
530 if (sizeof(pudval_t) > sizeof(long))
Jeremy Fitzhardinge4767afb2009-01-29 01:51:34 -0800531 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
532 pud.pud, (u64)pud.pud >> 32);
Eduardo Habkost90422192008-01-30 13:33:20 +0100533 else
Jeremy Fitzhardinge4767afb2009-01-29 01:51:34 -0800534 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
535 pud.pud);
Eduardo Habkost90422192008-01-30 13:33:20 +0100536
537 return ret;
538}
539
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300540static inline void pud_clear(pud_t *pudp)
541{
542 set_pud(pudp, __pud(0));
543}
544
545static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
546{
547 p4dval_t val = native_p4d_val(p4d);
548
549 if (sizeof(p4dval_t) > sizeof(long))
550 PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
551 val, (u64)val >> 32);
552 else
553 PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
554 val);
555}
556
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300557#if CONFIG_PGTABLE_LEVELS >= 5
558
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300559static inline p4d_t __p4d(p4dval_t val)
560{
561 p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
562
563 return (p4d_t) { ret };
564}
565
566static inline p4dval_t p4d_val(p4d_t p4d)
567{
568 return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
569}
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300570
Eduardo Habkost90422192008-01-30 13:33:20 +0100571static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
572{
573 pgdval_t val = native_pgd_val(pgd);
574
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300575 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
Eduardo Habkost90422192008-01-30 13:33:20 +0100576}
577
578static inline void pgd_clear(pgd_t *pgdp)
579{
580 set_pgd(pgdp, __pgd(0));
581}
582
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300583#endif /* CONFIG_PGTABLE_LEVELS == 5 */
Eduardo Habkost90422192008-01-30 13:33:20 +0100584
Kirill A. Shutemov335437f2017-03-30 11:07:28 +0300585static inline void p4d_clear(p4d_t *p4dp)
586{
587 set_p4d(p4dp, __p4d(0));
588}
589
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700590#endif /* CONFIG_PGTABLE_LEVELS == 4 */
Eduardo Habkost90422192008-01-30 13:33:20 +0100591
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700592#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
Glauber de Oliveira Costa1fe91512008-01-30 13:33:19 +0100593
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100594#ifdef CONFIG_X86_PAE
595/* Special-case pte-setting operations for PAE, which can't update a
596 64-bit pte atomically */
597static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
598{
599 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
600 pte.pte, pte.pte >> 32);
601}
602
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100603static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
604 pte_t *ptep)
605{
606 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
607}
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100608
609static inline void pmd_clear(pmd_t *pmdp)
610{
611 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
612}
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100613#else /* !CONFIG_X86_PAE */
614static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
615{
616 set_pte(ptep, pte);
617}
618
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100619static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
620 pte_t *ptep)
621{
622 set_pte_at(mm, addr, ptep, __pte(0));
623}
Jeremy Fitzhardinge60b3f622008-01-30 13:33:15 +0100624
625static inline void pmd_clear(pmd_t *pmdp)
626{
627 set_pmd(pmdp, __pmd(0));
628}
Jeremy Fitzhardinge4eed80c2008-01-30 13:33:15 +0100629#endif /* CONFIG_X86_PAE */
630
Jeremy Fitzhardinge7fd7d832009-02-17 23:24:03 -0800631#define __HAVE_ARCH_START_CONTEXT_SWITCH
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800632static inline void arch_start_context_switch(struct task_struct *prev)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200633{
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800634 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200635}
636
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800637static inline void arch_end_context_switch(struct task_struct *next)
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200638{
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800639 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200640}
641
Zachary Amsden9226d122007-02-13 13:26:21 +0100642#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200643static inline void arch_enter_lazy_mmu_mode(void)
644{
Jeremy Fitzhardinge8965c1c02007-10-16 11:51:29 -0700645 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200646}
647
648static inline void arch_leave_lazy_mmu_mode(void)
649{
Jeremy Fitzhardinge8965c1c02007-10-16 11:51:29 -0700650 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200651}
652
Boris Ostrovsky511ba862013-03-23 09:36:36 -0400653static inline void arch_flush_lazy_mmu_mode(void)
654{
655 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
656}
Zachary Amsden9226d122007-02-13 13:26:21 +0100657
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700658static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -0700659 phys_addr_t phys, pgprot_t flags)
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700660{
661 pv_mmu_ops.set_fixmap(idx, phys, flags);
662}
663
Jeremy Fitzhardingeb4ecc122009-05-13 17:16:55 -0700664#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
Ingo Molnar4bb689e2008-07-09 14:33:33 +0200665
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -0400666static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
667 u32 val)
668{
669 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
670}
671
672static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
673{
674 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
675}
676
677static __always_inline void pv_wait(u8 *ptr, u8 val)
678{
679 PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
680}
681
682static __always_inline void pv_kick(int cpu)
683{
684 PVOP_VCALL1(pv_lock_ops.kick, cpu);
685}
686
Waiman Long6c629852017-02-20 13:36:03 -0500687static __always_inline bool pv_vcpu_is_preempted(long cpu)
Peter Zijlstra3cded412016-11-15 16:47:06 +0100688{
689 return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
690}
691
Peter Zijlstra (Intel)f233f7f2015-04-24 14:56:38 -0400692#endif /* SMP && PARAVIRT_SPINLOCKS */
Ingo Molnar4bb689e2008-07-09 14:33:33 +0200693
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100694#ifdef CONFIG_X86_32
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800695#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
696#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
697
698/* save and restore all caller-save registers, except return value */
Jeremy Fitzhardingee584f552009-01-30 23:17:23 -0800699#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
700#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800701
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100702#define PV_FLAGS_ARG "0"
703#define PV_EXTRA_CLOBBERS
704#define PV_VEXTRA_CLOBBERS
705#else
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800706/* save and restore all caller-save registers, except return value */
707#define PV_SAVE_ALL_CALLER_REGS \
708 "push %rcx;" \
709 "push %rdx;" \
710 "push %rsi;" \
711 "push %rdi;" \
712 "push %r8;" \
713 "push %r9;" \
714 "push %r10;" \
715 "push %r11;"
716#define PV_RESTORE_ALL_CALLER_REGS \
717 "pop %r11;" \
718 "pop %r10;" \
719 "pop %r9;" \
720 "pop %r8;" \
721 "pop %rdi;" \
722 "pop %rsi;" \
723 "pop %rdx;" \
724 "pop %rcx;"
725
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100726/* We save some registers, but all of them, that's too much. We clobber all
727 * caller saved registers but the argument parameter */
728#define PV_SAVE_REGS "pushq %%rdi;"
729#define PV_RESTORE_REGS "popq %%rdi;"
Jeremy Fitzhardingec24481e2008-07-08 15:07:12 -0700730#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
731#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
Glauber de Oliveira Costa2e47d3e2008-01-30 13:32:07 +0100732#define PV_FLAGS_ARG "D"
733#endif
734
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800735/*
736 * Generate a thunk around a function which saves all caller-save
737 * registers except for the return value. This allows C functions to
738 * be called from assembler code where fewer than normal registers are
739 * available. It may also help code generation around calls from C
740 * code if the common case doesn't use many registers.
741 *
742 * When a callee is wrapped in a thunk, the caller can assume that all
743 * arg regs and all scratch registers are preserved across the
744 * call. The return value in rax/eax will not be saved, even for void
745 * functions.
746 */
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600747#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800748#define PV_CALLEE_SAVE_REGS_THUNK(func) \
749 extern typeof(func) __raw_callee_save_##func; \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800750 \
751 asm(".pushsection .text;" \
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600752 ".globl " PV_THUNK_NAME(func) ";" \
753 ".type " PV_THUNK_NAME(func) ", @function;" \
754 PV_THUNK_NAME(func) ":" \
755 FRAME_BEGIN \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800756 PV_SAVE_ALL_CALLER_REGS \
757 "call " #func ";" \
758 PV_RESTORE_ALL_CALLER_REGS \
Josh Poimboeuf87b240c2016-01-21 16:49:13 -0600759 FRAME_END \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800760 "ret;" \
761 ".popsection")
762
763/* Get a reference to a callee-save function */
764#define PV_CALLEE_SAVE(func) \
765 ((struct paravirt_callee_save) { __raw_callee_save_##func })
766
767/* Promise that "func" already uses the right calling convention */
768#define __PV_IS_CALLEE_SAVE(func) \
769 ((struct paravirt_callee_save) { func })
770
Steven Rostedtb5908542010-11-10 22:29:49 -0500771static inline notrace unsigned long arch_local_save_flags(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100772{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700773 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100774}
775
Steven Rostedtb5908542010-11-10 22:29:49 -0500776static inline notrace void arch_local_irq_restore(unsigned long f)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100777{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700778 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100779}
780
Steven Rostedtb5908542010-11-10 22:29:49 -0500781static inline notrace void arch_local_irq_disable(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100782{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700783 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100784}
785
Steven Rostedtb5908542010-11-10 22:29:49 -0500786static inline notrace void arch_local_irq_enable(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100787{
Jeremy Fitzhardinge71999d92009-10-12 16:32:43 -0700788 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
Rusty Russell139ec7c2006-12-07 02:14:08 +0100789}
790
Steven Rostedtb5908542010-11-10 22:29:49 -0500791static inline notrace unsigned long arch_local_irq_save(void)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100792{
793 unsigned long f;
794
David Howellsdf9ee292010-10-07 14:08:55 +0100795 f = arch_local_save_flags();
796 arch_local_irq_disable();
Rusty Russell139ec7c2006-12-07 02:14:08 +0100797 return f;
798}
799
Jeremy Fitzhardinge74d4aff2008-07-07 12:07:50 -0700800
Jeremy Fitzhardinge294688c2007-05-02 19:27:14 +0200801/* Make sure as little as possible of this mess escapes. */
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200802#undef PARAVIRT_CALL
Jeremy Fitzhardinge1a45b7a2007-05-02 19:27:15 +0200803#undef __PVOP_CALL
804#undef __PVOP_VCALL
Jeremy Fitzhardingef8822f42007-05-02 19:27:14 +0200805#undef PVOP_VCALL0
806#undef PVOP_CALL0
807#undef PVOP_VCALL1
808#undef PVOP_CALL1
809#undef PVOP_VCALL2
810#undef PVOP_CALL2
811#undef PVOP_VCALL3
812#undef PVOP_CALL3
813#undef PVOP_VCALL4
814#undef PVOP_CALL4
Rusty Russell139ec7c2006-12-07 02:14:08 +0100815
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200816extern void default_banner(void);
817
Rusty Russelld3561b72006-12-07 02:14:07 +0100818#else /* __ASSEMBLY__ */
819
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100820#define _PVSITE(ptype, clobbers, ops, word, algn) \
Rusty Russell139ec7c2006-12-07 02:14:08 +0100821771:; \
822 ops; \
823772:; \
824 .pushsection .parainstructions,"a"; \
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100825 .align algn; \
826 word 771b; \
Rusty Russell139ec7c2006-12-07 02:14:08 +0100827 .byte ptype; \
828 .byte 772b-771b; \
829 .short clobbers; \
830 .popsection
831
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100832
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800833#define COND_PUSH(set, mask, reg) \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800834 .if ((~(set)) & mask); push %reg; .endif
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800835#define COND_POP(set, mask, reg) \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800836 .if ((~(set)) & mask); pop %reg; .endif
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800837
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100838#ifdef CONFIG_X86_64
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800839
840#define PV_SAVE_REGS(set) \
841 COND_PUSH(set, CLBR_RAX, rax); \
842 COND_PUSH(set, CLBR_RCX, rcx); \
843 COND_PUSH(set, CLBR_RDX, rdx); \
844 COND_PUSH(set, CLBR_RSI, rsi); \
845 COND_PUSH(set, CLBR_RDI, rdi); \
846 COND_PUSH(set, CLBR_R8, r8); \
847 COND_PUSH(set, CLBR_R9, r9); \
848 COND_PUSH(set, CLBR_R10, r10); \
849 COND_PUSH(set, CLBR_R11, r11)
850#define PV_RESTORE_REGS(set) \
851 COND_POP(set, CLBR_R11, r11); \
852 COND_POP(set, CLBR_R10, r10); \
853 COND_POP(set, CLBR_R9, r9); \
854 COND_POP(set, CLBR_R8, r8); \
855 COND_POP(set, CLBR_RDI, rdi); \
856 COND_POP(set, CLBR_RSI, rsi); \
857 COND_POP(set, CLBR_RDX, rdx); \
858 COND_POP(set, CLBR_RCX, rcx); \
859 COND_POP(set, CLBR_RAX, rax)
860
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100861#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100862#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400863#define PARA_INDIRECT(addr) *addr(%rip)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100864#else
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800865#define PV_SAVE_REGS(set) \
866 COND_PUSH(set, CLBR_EAX, eax); \
867 COND_PUSH(set, CLBR_EDI, edi); \
868 COND_PUSH(set, CLBR_ECX, ecx); \
869 COND_PUSH(set, CLBR_EDX, edx)
870#define PV_RESTORE_REGS(set) \
871 COND_POP(set, CLBR_EDX, edx); \
872 COND_POP(set, CLBR_ECX, ecx); \
873 COND_POP(set, CLBR_EDI, edi); \
874 COND_POP(set, CLBR_EAX, eax)
875
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100876#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100877#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400878#define PARA_INDIRECT(addr) *%cs:addr
Glauber de Oliveira Costa658be9d2008-01-30 13:32:06 +0100879#endif
880
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700881#define INTERRUPT_RETURN \
882 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400883 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
Rusty Russell139ec7c2006-12-07 02:14:08 +0100884
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200885#define DISABLE_INTERRUPTS(clobbers) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700886 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800887 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400888 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800889 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100890
Jeremy Fitzhardinged5822032007-05-02 19:27:14 +0200891#define ENABLE_INTERRUPTS(clobbers) \
Jeremy Fitzhardinge93b1eab2007-10-16 11:51:29 -0700892 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800893 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400894 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
Jeremy Fitzhardingeecb93d12009-01-28 14:35:05 -0800895 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100896
Glauber de Oliveira Costa6057fc82008-01-30 13:32:06 +0100897#ifdef CONFIG_X86_32
Jeremy Fitzhardinge491eccb2008-06-25 00:19:15 -0400898#define GET_CR0_INTO_EAX \
899 push %ecx; push %edx; \
900 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
Jeremy Fitzhardinge42c24fa2007-05-02 19:27:14 +0200901 pop %edx; pop %ecx
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400902#else /* !CONFIG_X86_32 */
Jeremy Fitzhardingea00394f2008-06-25 00:19:30 -0400903
904/*
905 * If swapgs is used while the userspace stack is still current,
906 * there's no way to call a pvop. The PV replacement *must* be
907 * inlined, or the swapgs instruction must be trapped and emulated.
908 */
909#define SWAPGS_UNSAFE_STACK \
910 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
911 swapgs)
912
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800913/*
914 * Note: swapgs is very special, and in practise is either going to be
915 * implemented with a single "swapgs" instruction or something very
916 * special. Either way, we don't need to save any registers for
917 * it.
918 */
Glauber de Oliveira Costae801f862008-01-30 13:32:08 +0100919#define SWAPGS \
920 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
Jeremy Fitzhardinge9104a182009-01-28 14:35:04 -0800921 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
Glauber de Oliveira Costae801f862008-01-30 13:32:08 +0100922 )
923
H. Peter Anvinffc4bc92012-04-18 17:16:48 -0700924#define GET_CR2_INTO_RAX \
925 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
Glauber de Oliveira Costa4a8c4c42008-01-30 13:32:07 +0100926
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400927#define USERGS_SYSRET64 \
928 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -0400929 CLBR_NONE, \
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400930 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400931#endif /* CONFIG_X86_32 */
Rusty Russell139ec7c2006-12-07 02:14:08 +0100932
Rusty Russelld3561b72006-12-07 02:14:07 +0100933#endif /* __ASSEMBLY__ */
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200934#else /* CONFIG_PARAVIRT */
935# define default_banner x86_init_noop
Dave Hansena1ea1c02014-11-18 10:23:49 -0800936#ifndef __ASSEMBLY__
937static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
938 struct mm_struct *mm)
939{
940}
941
942static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
943{
944}
945#endif /* __ASSEMBLY__ */
Thomas Gleixner6f30c1a2009-08-20 13:19:57 +0200946#endif /* !CONFIG_PARAVIRT */
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700947#endif /* _ASM_X86_PARAVIRT_H */