blob: 453d2355cd61dd46ddeadf7620a293b49918c715 [file] [log] [blame]
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +02001#define pr_fmt(fmt) "Hyper-V: " fmt
2
3#include <linux/hyperv.h>
4#include <linux/log2.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7
8#include <asm/fpu/api.h>
9#include <asm/mshyperv.h>
10#include <asm/msr.h>
11#include <asm/tlbflush.h>
12
Vitaly Kuznetsov773b79f2017-08-02 18:09:21 +020013#define CREATE_TRACE_POINTS
14#include <asm/trace/hyperv.h>
15
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020016/* Each gva in gva_list encodes up to 4096 pages to flush */
17#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
18
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +020019static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
20 const struct flush_tlb_info *info);
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +020021
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020022/*
23 * Fills in gva_list starting from offset. Returns the number of items added.
24 */
25static inline int fill_gva_list(u64 gva_list[], int offset,
26 unsigned long start, unsigned long end)
27{
28 int gva_n = offset;
29 unsigned long cur = start, diff;
30
31 do {
32 diff = end > cur ? end - cur : 0;
33
34 gva_list[gva_n] = cur & PAGE_MASK;
35 /*
36 * Lower 12 bits encode the number of additional
37 * pages to flush (in addition to the 'cur' page).
38 */
39 if (diff >= HV_TLB_FLUSH_UNIT)
40 gva_list[gva_n] |= ~PAGE_MASK;
41 else if (diff)
42 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
43
44 cur += HV_TLB_FLUSH_UNIT;
45 gva_n++;
46
47 } while (cur < end);
48
49 return gva_n - offset;
50}
51
52static void hyperv_flush_tlb_others(const struct cpumask *cpus,
53 const struct flush_tlb_info *info)
54{
55 int cpu, vcpu, gva_n, max_gvas;
Vitaly Kuznetsovc9c92be2018-05-16 17:21:24 +020056 struct hv_tlb_flush **flush_pcpu;
57 struct hv_tlb_flush *flush;
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020058 u64 status = U64_MAX;
59 unsigned long flags;
60
Vitaly Kuznetsov773b79f2017-08-02 18:09:21 +020061 trace_hyperv_mmu_flush_tlb_others(cpus, info);
62
K. Y. Srinivasan9a2d78e2018-05-16 14:53:34 -070063 if (!hv_hypercall_pg)
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020064 goto do_native;
65
66 if (cpumask_empty(cpus))
67 return;
68
69 local_irq_save(flags);
70
Vitaly Kuznetsovc9c92be2018-05-16 17:21:24 +020071 flush_pcpu = (struct hv_tlb_flush **)
K. Y. Srinivasan9a2d78e2018-05-16 14:53:34 -070072 this_cpu_ptr(hyperv_pcpu_input_arg);
Vitaly Kuznetsov60d73a72017-10-05 13:39:24 +020073
74 flush = *flush_pcpu;
75
76 if (unlikely(!flush)) {
77 local_irq_restore(flags);
78 goto do_native;
79 }
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020080
81 if (info->mm) {
Vitaly Kuznetsov617ab452018-01-24 11:36:29 +010082 /*
83 * AddressSpace argument must match the CR3 with PCID bits
84 * stripped out.
85 */
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020086 flush->address_space = virt_to_phys(info->mm->pgd);
Vitaly Kuznetsov617ab452018-01-24 11:36:29 +010087 flush->address_space &= CR3_ADDR_MASK;
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +020088 flush->flags = 0;
89 } else {
90 flush->address_space = 0;
91 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
92 }
93
94 flush->processor_mask = 0;
95 if (cpumask_equal(cpus, cpu_present_mask)) {
96 flush->flags |= HV_FLUSH_ALL_PROCESSORS;
97 } else {
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +020098 /*
99 * From the supplied CPU set we need to figure out if we can get
100 * away with cheaper HVCALL_FLUSH_VIRTUAL_ADDRESS_{LIST,SPACE}
101 * hypercalls. This is possible when the highest VP number in
102 * the set is < 64. As VP numbers are usually in ascending order
103 * and match Linux CPU ids, here is an optimization: we check
104 * the VP number for the highest bit in the supplied set first
105 * so we can quickly find out if using *_EX hypercalls is a
106 * must. We will also check all VP numbers when walking the
107 * supplied CPU set to remain correct in all cases.
108 */
109 if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
110 goto do_ex_hypercall;
111
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200112 for_each_cpu(cpu, cpus) {
113 vcpu = hv_cpu_number_to_vp_number(cpu);
114 if (vcpu >= 64)
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200115 goto do_ex_hypercall;
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200116
117 __set_bit(vcpu, (unsigned long *)
118 &flush->processor_mask);
119 }
120 }
121
122 /*
123 * We can flush not more than max_gvas with one hypercall. Flush the
124 * whole address space if we were asked to do more.
125 */
126 max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
127
128 if (info->end == TLB_FLUSH_ALL) {
129 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
130 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
131 flush, NULL);
132 } else if (info->end &&
133 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
134 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
135 flush, NULL);
136 } else {
137 gva_n = fill_gva_list(flush->gva_list, 0,
138 info->start, info->end);
139 status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
140 gva_n, 0, flush, NULL);
141 }
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200142 goto check_status;
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200143
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200144do_ex_hypercall:
145 status = hyperv_flush_tlb_others_ex(cpus, info);
146
147check_status:
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200148 local_irq_restore(flags);
149
150 if (!(status & HV_HYPERCALL_RESULT_MASK))
151 return;
152do_native:
153 native_flush_tlb_others(cpus, info);
154}
155
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200156static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
157 const struct flush_tlb_info *info)
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200158{
159 int nr_bank = 0, max_gvas, gva_n;
Vitaly Kuznetsovc9c92be2018-05-16 17:21:24 +0200160 struct hv_tlb_flush_ex **flush_pcpu;
161 struct hv_tlb_flush_ex *flush;
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200162 u64 status;
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200163
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200164 if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
165 return U64_MAX;
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200166
Vitaly Kuznetsovc9c92be2018-05-16 17:21:24 +0200167 flush_pcpu = (struct hv_tlb_flush_ex **)
K. Y. Srinivasan9a2d78e2018-05-16 14:53:34 -0700168 this_cpu_ptr(hyperv_pcpu_input_arg);
Vitaly Kuznetsov60d73a72017-10-05 13:39:24 +0200169
170 flush = *flush_pcpu;
171
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200172 if (info->mm) {
Vitaly Kuznetsov617ab452018-01-24 11:36:29 +0100173 /*
174 * AddressSpace argument must match the CR3 with PCID bits
175 * stripped out.
176 */
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200177 flush->address_space = virt_to_phys(info->mm->pgd);
Vitaly Kuznetsov617ab452018-01-24 11:36:29 +0100178 flush->address_space &= CR3_ADDR_MASK;
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200179 flush->flags = 0;
180 } else {
181 flush->address_space = 0;
182 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
183 }
184
185 flush->hv_vp_set.valid_bank_mask = 0;
186
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200187 flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
188 nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
Vitaly Kuznetsov0f0caa52018-07-09 19:40:11 +0200189 if (nr_bank < 0)
190 return U64_MAX;
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200191
192 /*
193 * We can flush not more than max_gvas with one hypercall. Flush the
194 * whole address space if we were asked to do more.
195 */
196 max_gvas =
197 (PAGE_SIZE - sizeof(*flush) - nr_bank *
198 sizeof(flush->hv_vp_set.bank_contents[0])) /
199 sizeof(flush->gva_list[0]);
200
201 if (info->end == TLB_FLUSH_ALL) {
202 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
203 status = hv_do_rep_hypercall(
204 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
Marcelo Henrique Cerriab7ff472017-10-05 10:34:29 -0300205 0, nr_bank, flush, NULL);
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200206 } else if (info->end &&
207 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
208 status = hv_do_rep_hypercall(
209 HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
Marcelo Henrique Cerriab7ff472017-10-05 10:34:29 -0300210 0, nr_bank, flush, NULL);
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200211 } else {
212 gva_n = fill_gva_list(flush->gva_list, nr_bank,
213 info->start, info->end);
214 status = hv_do_rep_hypercall(
215 HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
Marcelo Henrique Cerriab7ff472017-10-05 10:34:29 -0300216 gva_n, nr_bank, flush, NULL);
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200217 }
218
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200219 return status;
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200220}
221
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200222void hyperv_setup_mmu_ops(void)
223{
Vitaly Kuznetsov628f54c2017-08-02 18:09:20 +0200224 if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
225 return;
226
Vitaly Kuznetsov0e4c88f2018-06-21 15:32:38 +0200227 pr_info("Using hypercall for remote TLB flush\n");
228 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
Vitaly Kuznetsov2ffd9e32017-08-02 18:09:19 +0200229}