blob: eb31ed291b9334f84602e30a0ea4cf64b7bda53b [file] [log] [blame]
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -07001/*
2 * Xen mmu operations
3 *
4 * This file contains the various mmu fetch and update operations.
5 * The most important job they must perform is the mapping between the
6 * domain's pfn and the overall machine mfns.
7 *
8 * Xen allows guests to directly update the pagetable, in a controlled
9 * fashion. In other words, the guest modifies the same pagetable
10 * that the CPU actually uses, which eliminates the overhead of having
11 * a separate shadow pagetable.
12 *
13 * In order to allow this, it falls on the guest domain to map its
14 * notion of a "physical" pfn - which is just a domain-local linear
15 * address - into a real "machine address" which the CPU's MMU can
16 * use.
17 *
18 * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
19 * inserted directly into the pagetable. When creating a new
20 * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
21 * when reading the content back with __(pgd|pmd|pte)_val, it converts
22 * the mfn back into a pfn.
23 *
24 * The other constraint is that all pages which make up a pagetable
25 * must be mapped read-only in the guest. This prevents uncontrolled
26 * guest updates to the pagetable. Xen strictly enforces this, and
27 * will disallow any pagetable update which will end up mapping a
28 * pagetable page RW, and will disallow using any writable page as a
29 * pagetable.
30 *
31 * Naively, when loading %cr3 with the base of a new pagetable, Xen
32 * would need to validate the whole pagetable before going on.
33 * Naturally, this is quite slow. The solution is to "pin" a
34 * pagetable, which enforces all the constraints on the pagetable even
35 * when it is not actively in use. This menas that Xen can be assured
36 * that it is still valid when you do load it into %cr3, and doesn't
37 * need to revalidate it.
38 *
39 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
40 */
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -070041#include <linux/sched.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070042#include <linux/highmem.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070043#include <linux/bug.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070044
45#include <asm/pgtable.h>
46#include <asm/tlbflush.h>
47#include <asm/mmu_context.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070048#include <asm/paravirt.h>
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070049#include <asm/linkage.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070050
51#include <asm/xen/hypercall.h>
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070052#include <asm/xen/hypervisor.h>
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070053
54#include <xen/page.h>
55#include <xen/interface/xen.h>
56
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -070057#include "multicalls.h"
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -070058#include "mmu.h"
59
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010060#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010061#define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010062
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010063/* Placeholder for holes in the address space */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070064static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010065 { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
66
67 /* Array of pointers to pages containing p2m entries */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070068static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +010069 { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010070
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +010071/* Arrays of p2m arrays expressed in mfns used for save/restore */
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070072static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +010073
Jeremy Fitzhardingecbcd79c2008-07-08 15:06:27 -070074static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
75 __page_aligned_bss;
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +010076
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010077static inline unsigned p2m_top_index(unsigned long pfn)
78{
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010079 BUG_ON(pfn >= MAX_DOMAIN_PAGES);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +010080 return pfn / P2M_ENTRIES_PER_PAGE;
81}
82
83static inline unsigned p2m_index(unsigned long pfn)
84{
85 return pfn % P2M_ENTRIES_PER_PAGE;
86}
87
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +010088/* Build the parallel p2m_top_mfn structures */
89void xen_setup_mfn_list_list(void)
90{
91 unsigned pfn, idx;
92
93 for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
94 unsigned topidx = p2m_top_index(pfn);
95
96 p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
97 }
98
99 for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
100 unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
101 p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
102 }
103
104 BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
105
106 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
107 virt_to_mfn(p2m_top_mfn_list);
108 HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
109}
110
111/* Set up p2m_top to point to the domain-builder provided p2m pages */
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100112void __init xen_build_dynamic_phys_to_machine(void)
113{
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100114 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100115 unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +0100116 unsigned pfn;
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100117
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100118 for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100119 unsigned topidx = p2m_top_index(pfn);
120
121 p2m_top[topidx] = &mfn_list[pfn];
122 }
123}
124
125unsigned long get_phys_to_machine(unsigned long pfn)
126{
127 unsigned topidx, idx;
128
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100129 if (unlikely(pfn >= MAX_DOMAIN_PAGES))
130 return INVALID_P2M_ENTRY;
131
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100132 topidx = p2m_top_index(pfn);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100133 idx = p2m_index(pfn);
134 return p2m_top[topidx][idx];
135}
Ingo Molnar15ce60052008-06-02 13:20:11 +0200136EXPORT_SYMBOL_GPL(get_phys_to_machine);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100137
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +0100138static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100139{
140 unsigned long *p;
141 unsigned i;
142
143 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
144 BUG_ON(p == NULL);
145
146 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
147 p[i] = INVALID_P2M_ENTRY;
148
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100149 if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100150 free_page((unsigned long)p);
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +0100151 else
152 *mfnp = virt_to_mfn(p);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100153}
154
155void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
156{
157 unsigned topidx, idx;
158
159 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
160 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
161 return;
162 }
163
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100164 if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
165 BUG_ON(mfn != INVALID_P2M_ENTRY);
166 return;
167 }
168
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100169 topidx = p2m_top_index(pfn);
Jeremy Fitzhardingecf0923e2008-05-26 23:31:20 +0100170 if (p2m_top[topidx] == p2m_missing) {
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100171 /* no need to allocate a page to store an invalid entry */
172 if (mfn == INVALID_P2M_ENTRY)
173 return;
Jeremy Fitzhardinged5edbc1f2008-05-26 23:31:22 +0100174 alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
Jeremy Fitzhardinged451bb72008-05-26 23:31:18 +0100175 }
176
177 idx = p2m_index(pfn);
178 p2m_top[topidx][idx] = mfn;
179}
180
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700181xmaddr_t arbitrary_virt_to_machine(void *vaddr)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700182{
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700183 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100184 unsigned int level;
Ingo Molnarf0646e42008-01-30 13:33:43 +0100185 pte_t *pte = lookup_address(address, &level);
Jan Beulichde067812008-05-15 13:24:52 +0100186 unsigned offset = address & ~PAGE_MASK;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700187
188 BUG_ON(pte == NULL);
189
Jeremy Fitzhardingeebd879e2008-07-08 15:06:54 -0700190 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700191}
192
193void make_lowmem_page_readonly(void *vaddr)
194{
195 pte_t *pte, ptev;
196 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100197 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700198
Ingo Molnarf0646e42008-01-30 13:33:43 +0100199 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700200 BUG_ON(pte == NULL);
201
202 ptev = pte_wrprotect(*pte);
203
204 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
205 BUG();
206}
207
208void make_lowmem_page_readwrite(void *vaddr)
209{
210 pte_t *pte, ptev;
211 unsigned long address = (unsigned long)vaddr;
Harvey Harrisonda7bfc52008-02-09 23:24:08 +0100212 unsigned int level;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700213
Ingo Molnarf0646e42008-01-30 13:33:43 +0100214 pte = lookup_address(address, &level);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700215 BUG_ON(pte == NULL);
216
217 ptev = pte_mkwrite(*pte);
218
219 if (HYPERVISOR_update_va_mapping(address, ptev, 0))
220 BUG();
221}
222
223
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100224static bool page_pinned(void *ptr)
225{
226 struct page *page = virt_to_page(ptr);
227
228 return PagePinned(page);
229}
230
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700231static void extend_mmu_update(const struct mmu_update *update)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700232{
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700233 struct multicall_space mcs;
234 struct mmu_update *u;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700235
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700236 mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
237
238 if (mcs.mc != NULL)
239 mcs.mc->args[1]++;
240 else {
241 mcs = __xen_mc_entry(sizeof(*u));
242 MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
243 }
244
245 u = mcs.args;
246 *u = *update;
247}
248
249void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
250{
251 struct mmu_update u;
252
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700253 preempt_disable();
254
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700255 xen_mc_batch();
256
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700257 /* ptr may be ioremapped for 64-bit pagetable setup */
258 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700259 u.val = pmd_val_ma(val);
260 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700261
262 xen_mc_issue(PARAVIRT_LAZY_MMU);
263
264 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700265}
266
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100267void xen_set_pmd(pmd_t *ptr, pmd_t val)
268{
269 /* If page is not pinned, we can just update the entry
270 directly */
271 if (!page_pinned(ptr)) {
272 *ptr = val;
273 return;
274 }
275
276 xen_set_pmd_hyper(ptr, val);
277}
278
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700279/*
280 * Associate a virtual page frame with a given physical page frame
281 * and protection flags for that frame.
282 */
283void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
284{
Jeremy Fitzhardinge836fe2f2008-07-08 15:06:58 -0700285 set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700286}
287
288void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
289 pte_t *ptep, pte_t pteval)
290{
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700291 /* updates to init_mm may be done without lock */
292 if (mm == &init_mm)
293 preempt_disable();
294
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700295 if (mm == current->mm || mm == &init_mm) {
Jeremy Fitzhardinge8965c1c02007-10-16 11:51:29 -0700296 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700297 struct multicall_space mcs;
298 mcs = xen_mc_entry(0);
299
300 MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
301 xen_mc_issue(PARAVIRT_LAZY_MMU);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700302 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700303 } else
304 if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700305 goto out;
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700306 }
307 xen_set_pte(ptep, pteval);
Jeremy Fitzhardinge2bd50032008-04-02 10:54:10 -0700308
309out:
310 if (mm == &init_mm)
311 preempt_enable();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700312}
313
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700314pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
315{
316 /* Just return the pte as-is. We preserve the bits on commit */
317 return *ptep;
318}
319
320void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
321 pte_t *ptep, pte_t pte)
322{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700323 struct mmu_update u;
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700324
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700325 xen_mc_batch();
326
327 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
328 u.val = pte_val_ma(pte);
329 extend_mmu_update(&u);
Jeremy Fitzhardingee57778a2008-06-16 04:30:02 -0700330
331 xen_mc_issue(PARAVIRT_LAZY_MMU);
332}
333
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700334/* Assume pteval_t is equivalent to all the other *val_t types. */
335static pteval_t pte_mfn_to_pfn(pteval_t val)
336{
337 if (val & _PAGE_PRESENT) {
338 unsigned long mfn = (val & PTE_MASK) >> PAGE_SHIFT;
339 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700340 val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700341 }
342
343 return val;
344}
345
346static pteval_t pte_pfn_to_mfn(pteval_t val)
347{
348 if (val & _PAGE_PRESENT) {
349 unsigned long pfn = (val & PTE_MASK) >> PAGE_SHIFT;
350 pteval_t flags = val & ~PTE_MASK;
Jeremy Fitzhardinged8355ac2008-07-03 22:10:18 -0700351 val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700352 }
353
354 return val;
355}
356
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700357pteval_t xen_pte_val(pte_t pte)
358{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700359 return pte_mfn_to_pfn(pte.pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700360}
361
362pgdval_t xen_pgd_val(pgd_t pgd)
363{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700364 return pte_mfn_to_pfn(pgd.pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700365}
366
367pte_t xen_make_pte(pteval_t pte)
368{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700369 pte = pte_pfn_to_mfn(pte);
370 return native_make_pte(pte);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700371}
372
373pgd_t xen_make_pgd(pgdval_t pgd)
374{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700375 pgd = pte_pfn_to_mfn(pgd);
376 return native_make_pgd(pgd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700377}
378
379pmdval_t xen_pmd_val(pmd_t pmd)
380{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700381 return pte_mfn_to_pfn(pmd.pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700382}
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100383
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100384void xen_set_pud_hyper(pud_t *ptr, pud_t val)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700385{
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700386 struct mmu_update u;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700387
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700388 preempt_disable();
389
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700390 xen_mc_batch();
391
Jeremy Fitzhardingece803e72008-07-08 15:06:55 -0700392 /* ptr may be ioremapped for 64-bit pagetable setup */
393 u.ptr = arbitrary_virt_to_machine(ptr).maddr;
Jeremy Fitzhardinge400d3492008-06-16 04:30:03 -0700394 u.val = pud_val_ma(val);
395 extend_mmu_update(&u);
Jeremy Fitzhardinged66bf8f2007-07-17 18:37:06 -0700396
397 xen_mc_issue(PARAVIRT_LAZY_MMU);
398
399 preempt_enable();
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700400}
401
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100402void xen_set_pud(pud_t *ptr, pud_t val)
403{
404 /* If page is not pinned, we can just update the entry
405 directly */
406 if (!page_pinned(ptr)) {
407 *ptr = val;
408 return;
409 }
410
411 xen_set_pud_hyper(ptr, val);
412}
413
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700414void xen_set_pte(pte_t *ptep, pte_t pte)
415{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700416#ifdef CONFIG_X86_PAE
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700417 ptep->pte_high = pte.pte_high;
418 smp_wmb();
419 ptep->pte_low = pte.pte_low;
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700420#else
421 *ptep = pte;
422#endif
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700423}
424
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700425#ifdef CONFIG_X86_PAE
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700426void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
427{
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700428 set_64bit((u64 *)ptep, native_pte_val(pte));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700429}
430
431void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
432{
433 ptep->pte_low = 0;
434 smp_wmb(); /* make sure low gets written first */
435 ptep->pte_high = 0;
436}
437
438void xen_pmd_clear(pmd_t *pmdp)
439{
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100440 set_pmd(pmdp, __pmd(0));
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700441}
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700442#endif /* CONFIG_X86_PAE */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700443
Jeremy Fitzhardingeabf33032008-03-17 16:37:07 -0700444pmd_t xen_make_pmd(pmdval_t pmd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700445{
Jeremy Fitzhardingeebb9cfe2008-06-16 15:01:56 -0700446 pmd = pte_pfn_to_mfn(pmd);
Jeremy Fitzhardinge947a69c2008-03-17 16:37:09 -0700447 return native_make_pmd(pmd);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700448}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700449
Jeremy Fitzhardingef6e58732008-07-08 15:06:38 -0700450#if PAGETABLE_LEVELS == 4
451pudval_t xen_pud_val(pud_t pud)
452{
453 return pte_mfn_to_pfn(pud.pud);
454}
455
456pud_t xen_make_pud(pudval_t pud)
457{
458 pud = pte_pfn_to_mfn(pud);
459
460 return native_make_pud(pud);
461}
462
463void xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
464{
465 struct mmu_update u;
466
467 preempt_disable();
468
469 xen_mc_batch();
470
471 u.ptr = virt_to_machine(ptr).maddr;
472 u.val = pgd_val_ma(val);
473 extend_mmu_update(&u);
474
475 xen_mc_issue(PARAVIRT_LAZY_MMU);
476
477 preempt_enable();
478}
479
480void xen_set_pgd(pgd_t *ptr, pgd_t val)
481{
482 /* If page is not pinned, we can just update the entry
483 directly */
484 if (!page_pinned(ptr)) {
485 *ptr = val;
486 return;
487 }
488
489 xen_set_pgd_hyper(ptr, val);
490}
491#endif /* PAGETABLE_LEVELS == 4 */
492
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700493/*
494 (Yet another) pagetable walker. This one is intended for pinning a
495 pagetable. This means that it walks a pagetable and calls the
496 callback function on each page it finds making up the page table,
497 at every level. It walks the entire pagetable, but it only bothers
498 pinning pte pages which are below pte_limit. In the normal case
499 this will be TASK_SIZE, but at boot we need to pin up to
500 FIXADDR_TOP. But the important bit is that we don't pin beyond
501 there, because then we start getting into Xen's ptes.
502*/
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700503static int pgd_walk(pgd_t *pgd_base, int (*func)(struct page *, enum pt_level),
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700504 unsigned long limit)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700505{
506 pgd_t *pgd = pgd_base;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700507 int flush = 0;
508 unsigned long addr = 0;
509 unsigned long pgd_next;
510
511 BUG_ON(limit > FIXADDR_TOP);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700512
513 if (xen_feature(XENFEAT_auto_translated_physmap))
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700514 return 0;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700515
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700516 for (; addr != FIXADDR_TOP; pgd++, addr = pgd_next) {
517 pud_t *pud;
518 unsigned long pud_limit, pud_next;
519
520 pgd_next = pud_limit = pgd_addr_end(addr, FIXADDR_TOP);
521
522 if (!pgd_val(*pgd))
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700523 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700524
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700525 pud = pud_offset(pgd, 0);
526
527 if (PTRS_PER_PUD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700528 flush |= (*func)(virt_to_page(pud), PT_PUD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700529
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700530 for (; addr != pud_limit; pud++, addr = pud_next) {
531 pmd_t *pmd;
532 unsigned long pmd_limit;
533
534 pud_next = pud_addr_end(addr, pud_limit);
535
536 if (pud_next < limit)
537 pmd_limit = pud_next;
538 else
539 pmd_limit = limit;
540
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700541 if (pud_none(*pud))
542 continue;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700543
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700544 pmd = pmd_offset(pud, 0);
545
546 if (PTRS_PER_PMD > 1) /* not folded */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700547 flush |= (*func)(virt_to_page(pmd), PT_PMD);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700548
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700549 for (; addr != pmd_limit; pmd++) {
550 addr += (PAGE_SIZE * PTRS_PER_PTE);
551 if ((pmd_limit-1) < (addr-1)) {
552 addr = pmd_limit;
553 break;
554 }
555
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700556 if (pmd_none(*pmd))
557 continue;
558
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700559 flush |= (*func)(pmd_page(*pmd), PT_PTE);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700560 }
561 }
562 }
563
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700564 flush |= (*func)(virt_to_page(pgd_base), PT_PGD);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700565
566 return flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700567}
568
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700569static spinlock_t *lock_pte(struct page *page)
570{
571 spinlock_t *ptl = NULL;
572
573#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
574 ptl = __pte_lockptr(page);
575 spin_lock(ptl);
576#endif
577
578 return ptl;
579}
580
581static void do_unlock(void *v)
582{
583 spinlock_t *ptl = v;
584 spin_unlock(ptl);
585}
586
587static void xen_do_pin(unsigned level, unsigned long pfn)
588{
589 struct mmuext_op *op;
590 struct multicall_space mcs;
591
592 mcs = __xen_mc_entry(sizeof(*op));
593 op = mcs.args;
594 op->cmd = level;
595 op->arg1.mfn = pfn_to_mfn(pfn);
596 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
597}
598
599static int pin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700600{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700601 unsigned pgfl = TestSetPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700602 int flush;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700603
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700604 if (pgfl)
605 flush = 0; /* already pinned */
606 else if (PageHighMem(page))
607 /* kmaps need flushing if we found an unpinned
608 highpage */
609 flush = 1;
610 else {
611 void *pt = lowmem_page_address(page);
612 unsigned long pfn = page_to_pfn(page);
613 struct multicall_space mcs = __xen_mc_entry(0);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700614 spinlock_t *ptl;
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700615
616 flush = 0;
617
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700618 ptl = NULL;
619 if (level == PT_PTE)
620 ptl = lock_pte(page);
621
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700622 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
623 pfn_pte(pfn, PAGE_KERNEL_RO),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700624 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
625
626 if (level == PT_PTE)
627 xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
628
629 if (ptl) {
630 /* Queue a deferred unlock for when this batch
631 is completed. */
632 xen_mc_callback(do_unlock, ptl);
633 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700634 }
635
636 return flush;
637}
638
639/* This is called just after a mm has been created, but it has not
640 been used yet. We need to make sure that its pagetable is all
641 read-only, and can be pinned. */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700642void xen_pgd_pin(pgd_t *pgd)
643{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700644 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700645
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700646 if (pgd_walk(pgd, pin_page, TASK_SIZE)) {
647 /* re-enable interrupts for kmap_flush_unused */
648 xen_mc_issue(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700649 kmap_flush_unused();
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700650 xen_mc_batch();
651 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700652
Jeremy Fitzhardinge28499142008-05-09 12:05:57 +0100653 xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700654 xen_mc_issue(0);
655}
656
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100657/*
658 * On save, we need to pin all pagetables to make sure they get their
659 * mfns turned into pfns. Search the list for any unpinned pgds and pin
660 * them (unpinned pgds are not currently in use, probably because the
661 * process is under construction or destruction).
662 */
663void xen_mm_pin_all(void)
664{
665 unsigned long flags;
666 struct page *page;
667
668 spin_lock_irqsave(&pgd_lock, flags);
669
670 list_for_each_entry(page, &pgd_list, lru) {
671 if (!PagePinned(page)) {
672 xen_pgd_pin((pgd_t *)page_address(page));
673 SetPageSavePinned(page);
674 }
675 }
676
677 spin_unlock_irqrestore(&pgd_lock, flags);
678}
679
Eduardo Habkostc1f2f092008-07-08 15:06:24 -0700680/*
681 * The init_mm pagetable is really pinned as soon as its created, but
682 * that's before we have page structures to store the bits. So do all
683 * the book-keeping now.
684 */
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700685static __init int mark_pinned(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700686{
687 SetPagePinned(page);
688 return 0;
689}
690
691void __init xen_mark_init_mm_pinned(void)
692{
693 pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
694}
695
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700696static int unpin_page(struct page *page, enum pt_level level)
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700697{
Christoph Lameterd60cd462008-04-28 02:12:51 -0700698 unsigned pgfl = TestClearPagePinned(page);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700699
700 if (pgfl && !PageHighMem(page)) {
701 void *pt = lowmem_page_address(page);
702 unsigned long pfn = page_to_pfn(page);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700703 spinlock_t *ptl = NULL;
704 struct multicall_space mcs;
705
706 if (level == PT_PTE) {
707 ptl = lock_pte(page);
708
709 xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
710 }
711
712 mcs = __xen_mc_entry(0);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700713
714 MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
715 pfn_pte(pfn, PAGE_KERNEL),
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700716 level == PT_PGD ? UVMF_TLB_FLUSH : 0);
717
718 if (ptl) {
719 /* unlock when batch completed */
720 xen_mc_callback(do_unlock, ptl);
721 }
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700722 }
723
724 return 0; /* never need to flush on unpin */
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700725}
726
727/* Release a pagetables pages back as normal RW */
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700728static void xen_pgd_unpin(pgd_t *pgd)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700729{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700730 xen_mc_batch();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700731
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700732 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700733
734 pgd_walk(pgd, unpin_page, TASK_SIZE);
735
736 xen_mc_issue(0);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700737}
738
Jeremy Fitzhardinge0e913982008-05-26 23:31:27 +0100739/*
740 * On resume, undo any pinning done at save, so that the rest of the
741 * kernel doesn't see any unexpected pinned pagetables.
742 */
743void xen_mm_unpin_all(void)
744{
745 unsigned long flags;
746 struct page *page;
747
748 spin_lock_irqsave(&pgd_lock, flags);
749
750 list_for_each_entry(page, &pgd_list, lru) {
751 if (PageSavePinned(page)) {
752 BUG_ON(!PagePinned(page));
753 printk("unpinning pinned %p\n", page_address(page));
754 xen_pgd_unpin((pgd_t *)page_address(page));
755 ClearPageSavePinned(page);
756 }
757 }
758
759 spin_unlock_irqrestore(&pgd_lock, flags);
760}
761
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700762void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
763{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700764 spin_lock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700765 xen_pgd_pin(next->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700766 spin_unlock(&next->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700767}
768
769void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
770{
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700771 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700772 xen_pgd_pin(mm->pgd);
Jeremy Fitzhardingef4f97b32007-07-17 18:37:05 -0700773 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700774}
775
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700776
777#ifdef CONFIG_SMP
778/* Another cpu may still have their %cr3 pointing at the pagetable, so
779 we need to repoint it somewhere else before we can unpin it. */
780static void drop_other_mm_ref(void *info)
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700781{
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700782 struct mm_struct *mm = info;
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700783 struct mm_struct *active_mm;
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700784
Jeremy Fitzhardingece87b3d2008-07-08 15:06:40 -0700785#ifdef CONFIG_X86_64
786 active_mm = read_pda(active_mm);
787#else
788 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
789#endif
790
791 if (active_mm == mm)
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700792 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700793
794 /* If this cpu still has a stale cr3 reference, then make sure
795 it has been flushed. */
796 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
797 load_cr3(swapper_pg_dir);
798 arch_flush_lazy_cpu_mode();
799 }
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700800}
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700801
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700802static void drop_mm_ref(struct mm_struct *mm)
803{
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700804 cpumask_t mask;
805 unsigned cpu;
806
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700807 if (current->active_mm == mm) {
808 if (current->mm == mm)
809 load_cr3(swapper_pg_dir);
810 else
811 leave_mm(smp_processor_id());
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700812 arch_flush_lazy_cpu_mode();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700813 }
814
Jeremy Fitzhardinge9f799912007-10-16 11:51:30 -0700815 /* Get the "official" set of cpus referring to our pagetable. */
816 mask = mm->cpu_vm_mask;
817
818 /* It's possible that a vcpu may have a stale reference to our
819 cr3, because its in lazy mode, and it hasn't yet flushed
820 its set of pending hypercalls yet. In this case, we can
821 look at its actual current cr3 value, and force it to flush
822 if needed. */
823 for_each_online_cpu(cpu) {
824 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
825 cpu_set(cpu, mask);
826 }
827
828 if (!cpus_empty(mask))
Jens Axboe3b16cf82008-06-26 11:21:54 +0200829 smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
Jeremy Fitzhardingef87e4ca2007-07-17 18:37:06 -0700830}
831#else
832static void drop_mm_ref(struct mm_struct *mm)
833{
834 if (current->active_mm == mm)
835 load_cr3(swapper_pg_dir);
836}
837#endif
838
839/*
840 * While a process runs, Xen pins its pagetables, which means that the
841 * hypervisor forces it to be read-only, and it controls all updates
842 * to it. This means that all pagetable updates have to go via the
843 * hypervisor, which is moderately expensive.
844 *
845 * Since we're pulling the pagetable down, we switch to use init_mm,
846 * unpin old process pagetable and mark it all read-write, which
847 * allows further operations on it to be simple memory accesses.
848 *
849 * The only subtle point is that another CPU may be still using the
850 * pagetable because of lazy tlb flushing. This means we need need to
851 * switch all CPUs off this pagetable before we can unpin it.
852 */
853void xen_exit_mmap(struct mm_struct *mm)
854{
855 get_cpu(); /* make sure we don't move around */
856 drop_mm_ref(mm);
857 put_cpu();
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700858
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700859 spin_lock(&mm->page_table_lock);
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700860
861 /* pgd may not be pinned in the error exit path of execve */
Jeremy Fitzhardingee2426cf2008-05-31 01:24:27 +0100862 if (page_pinned(mm->pgd))
Jeremy Fitzhardingedf912ea2007-09-25 11:50:00 -0700863 xen_pgd_unpin(mm->pgd);
Jeremy Fitzhardinge74260712007-10-16 11:51:30 -0700864
Jeremy Fitzhardingef120f132007-07-17 18:37:06 -0700865 spin_unlock(&mm->page_table_lock);
Jeremy Fitzhardinge3b827c12007-07-17 18:37:04 -0700866}