Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Xen mmu operations |
| 3 | * |
| 4 | * This file contains the various mmu fetch and update operations. |
| 5 | * The most important job they must perform is the mapping between the |
| 6 | * domain's pfn and the overall machine mfns. |
| 7 | * |
| 8 | * Xen allows guests to directly update the pagetable, in a controlled |
| 9 | * fashion. In other words, the guest modifies the same pagetable |
| 10 | * that the CPU actually uses, which eliminates the overhead of having |
| 11 | * a separate shadow pagetable. |
| 12 | * |
| 13 | * In order to allow this, it falls on the guest domain to map its |
| 14 | * notion of a "physical" pfn - which is just a domain-local linear |
| 15 | * address - into a real "machine address" which the CPU's MMU can |
| 16 | * use. |
| 17 | * |
| 18 | * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be |
| 19 | * inserted directly into the pagetable. When creating a new |
| 20 | * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, |
| 21 | * when reading the content back with __(pgd|pmd|pte)_val, it converts |
| 22 | * the mfn back into a pfn. |
| 23 | * |
| 24 | * The other constraint is that all pages which make up a pagetable |
| 25 | * must be mapped read-only in the guest. This prevents uncontrolled |
| 26 | * guest updates to the pagetable. Xen strictly enforces this, and |
| 27 | * will disallow any pagetable update which will end up mapping a |
| 28 | * pagetable page RW, and will disallow using any writable page as a |
| 29 | * pagetable. |
| 30 | * |
| 31 | * Naively, when loading %cr3 with the base of a new pagetable, Xen |
| 32 | * would need to validate the whole pagetable before going on. |
| 33 | * Naturally, this is quite slow. The solution is to "pin" a |
| 34 | * pagetable, which enforces all the constraints on the pagetable even |
| 35 | * when it is not actively in use. This menas that Xen can be assured |
| 36 | * that it is still valid when you do load it into %cr3, and doesn't |
| 37 | * need to revalidate it. |
| 38 | * |
| 39 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 40 | */ |
Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 41 | #include <linux/sched.h> |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 42 | #include <linux/highmem.h> |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 43 | #include <linux/debugfs.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 44 | #include <linux/bug.h> |
Jeremy Fitzhardinge | d2cb214 | 2010-03-26 15:37:50 -0700 | [diff] [blame] | 45 | #include <linux/vmalloc.h> |
Randy Dunlap | 44408ad | 2009-05-12 13:31:40 -0700 | [diff] [blame] | 46 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 47 | #include <linux/gfp.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 48 | #include <linux/memblock.h> |
Konrad Rzeszutek Wilk | 2222e71 | 2010-12-22 08:57:30 -0500 | [diff] [blame] | 49 | #include <linux/seq_file.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 50 | |
| 51 | #include <asm/pgtable.h> |
| 52 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 53 | #include <asm/fixmap.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 54 | #include <asm/mmu_context.h> |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 55 | #include <asm/setup.h> |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 56 | #include <asm/paravirt.h> |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 57 | #include <asm/e820.h> |
Jeremy Fitzhardinge | cbcd79c | 2008-07-08 15:06:27 -0700 | [diff] [blame] | 58 | #include <asm/linkage.h> |
Alex Nixon | 08bbc9d | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 59 | #include <asm/page.h> |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 60 | #include <asm/init.h> |
Jeremy Fitzhardinge | 41f2e47 | 2010-03-30 11:47:40 -0700 | [diff] [blame] | 61 | #include <asm/pat.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 62 | |
| 63 | #include <asm/xen/hypercall.h> |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 64 | #include <asm/xen/hypervisor.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 65 | |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 66 | #include <xen/xen.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 67 | #include <xen/page.h> |
| 68 | #include <xen/interface/xen.h> |
Stefano Stabellini | 5915100 | 2010-06-17 14:22:52 +0100 | [diff] [blame] | 69 | #include <xen/interface/hvm/hvm_op.h> |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 70 | #include <xen/interface/version.h> |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 71 | #include <xen/interface/memory.h> |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 72 | #include <xen/hvc-console.h> |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 73 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 74 | #include "multicalls.h" |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 75 | #include "mmu.h" |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 76 | #include "debugfs.h" |
| 77 | |
| 78 | #define MMU_UPDATE_HISTO 30 |
| 79 | |
Alex Nixon | 19001c8 | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 80 | /* |
| 81 | * Protects atomic reservation decrease/increase against concurrent increases. |
Daniel Kiper | 06f521d | 2011-03-08 22:45:46 +0100 | [diff] [blame] | 82 | * Also protects non-atomic updates of current_pages and balloon lists. |
Alex Nixon | 19001c8 | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 83 | */ |
| 84 | DEFINE_SPINLOCK(xen_reservation_lock); |
| 85 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 86 | #ifdef CONFIG_XEN_DEBUG_FS |
| 87 | |
| 88 | static struct { |
| 89 | u32 pgd_update; |
| 90 | u32 pgd_update_pinned; |
| 91 | u32 pgd_update_batched; |
| 92 | |
| 93 | u32 pud_update; |
| 94 | u32 pud_update_pinned; |
| 95 | u32 pud_update_batched; |
| 96 | |
| 97 | u32 pmd_update; |
| 98 | u32 pmd_update_pinned; |
| 99 | u32 pmd_update_batched; |
| 100 | |
| 101 | u32 pte_update; |
| 102 | u32 pte_update_pinned; |
| 103 | u32 pte_update_batched; |
| 104 | |
| 105 | u32 mmu_update; |
| 106 | u32 mmu_update_extended; |
| 107 | u32 mmu_update_histo[MMU_UPDATE_HISTO]; |
| 108 | |
| 109 | u32 prot_commit; |
| 110 | u32 prot_commit_batched; |
| 111 | |
| 112 | u32 set_pte_at; |
| 113 | u32 set_pte_at_batched; |
| 114 | u32 set_pte_at_pinned; |
| 115 | u32 set_pte_at_current; |
| 116 | u32 set_pte_at_kernel; |
| 117 | } mmu_stats; |
| 118 | |
| 119 | static u8 zero_stats; |
| 120 | |
| 121 | static inline void check_zero(void) |
| 122 | { |
| 123 | if (unlikely(zero_stats)) { |
| 124 | memset(&mmu_stats, 0, sizeof(mmu_stats)); |
| 125 | zero_stats = 0; |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | #define ADD_STATS(elem, val) \ |
| 130 | do { check_zero(); mmu_stats.elem += (val); } while(0) |
| 131 | |
| 132 | #else /* !CONFIG_XEN_DEBUG_FS */ |
| 133 | |
| 134 | #define ADD_STATS(elem, val) do { (void)(val); } while(0) |
| 135 | |
| 136 | #endif /* CONFIG_XEN_DEBUG_FS */ |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 137 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 138 | |
| 139 | /* |
| 140 | * Identity map, in addition to plain kernel map. This needs to be |
| 141 | * large enough to allocate page table pages to allocate the rest. |
| 142 | * Each page can map 2MB. |
| 143 | */ |
Jeremy Fitzhardinge | 764f0138 | 2010-08-26 16:23:51 -0700 | [diff] [blame] | 144 | #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) |
| 145 | static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 146 | |
| 147 | #ifdef CONFIG_X86_64 |
| 148 | /* l3 pud for userspace vsyscall mapping */ |
| 149 | static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; |
| 150 | #endif /* CONFIG_X86_64 */ |
| 151 | |
| 152 | /* |
| 153 | * Note about cr3 (pagetable base) values: |
| 154 | * |
| 155 | * xen_cr3 contains the current logical cr3 value; it contains the |
| 156 | * last set cr3. This may not be the current effective cr3, because |
| 157 | * its update may be being lazily deferred. However, a vcpu looking |
| 158 | * at its own cr3 can use this value knowing that it everything will |
| 159 | * be self-consistent. |
| 160 | * |
| 161 | * xen_current_cr3 contains the actual vcpu cr3; it is set once the |
| 162 | * hypercall to set the vcpu cr3 is complete (so it may be a little |
| 163 | * out of date, but it will never be set early). If one vcpu is |
| 164 | * looking at another vcpu's cr3 value, it should use this variable. |
| 165 | */ |
| 166 | DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ |
| 167 | DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ |
| 168 | |
| 169 | |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 170 | /* |
| 171 | * Just beyond the highest usermode address. STACK_TOP_MAX has a |
| 172 | * redzone above it, so round it up to a PGD boundary. |
| 173 | */ |
| 174 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) |
| 175 | |
Jeremy Fitzhardinge | 9976b39 | 2009-02-27 09:19:26 -0800 | [diff] [blame] | 176 | unsigned long arbitrary_virt_to_mfn(void *vaddr) |
| 177 | { |
| 178 | xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); |
| 179 | |
| 180 | return PFN_DOWN(maddr.maddr); |
| 181 | } |
| 182 | |
Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 183 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 184 | { |
Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 185 | unsigned long address = (unsigned long)vaddr; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 186 | unsigned int level; |
Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 187 | pte_t *pte; |
| 188 | unsigned offset; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 189 | |
Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 190 | /* |
| 191 | * if the PFN is in the linear mapped vaddr range, we can just use |
| 192 | * the (quick) virt_to_machine() p2m lookup |
| 193 | */ |
| 194 | if (virt_addr_valid(vaddr)) |
| 195 | return virt_to_machine(vaddr); |
| 196 | |
| 197 | /* otherwise we have to do a (slower) full page-table walk */ |
| 198 | |
| 199 | pte = lookup_address(address, &level); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 200 | BUG_ON(pte == NULL); |
Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 201 | offset = address & ~PAGE_MASK; |
Jeremy Fitzhardinge | ebd879e | 2008-07-08 15:06:54 -0700 | [diff] [blame] | 202 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 203 | } |
Stephen Rothwell | de23be5 | 2011-01-15 10:36:26 +1100 | [diff] [blame] | 204 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 205 | |
| 206 | void make_lowmem_page_readonly(void *vaddr) |
| 207 | { |
| 208 | pte_t *pte, ptev; |
| 209 | unsigned long address = (unsigned long)vaddr; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 210 | unsigned int level; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 211 | |
Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 212 | pte = lookup_address(address, &level); |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 213 | if (pte == NULL) |
| 214 | return; /* vaddr missing */ |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 215 | |
| 216 | ptev = pte_wrprotect(*pte); |
| 217 | |
| 218 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) |
| 219 | BUG(); |
| 220 | } |
| 221 | |
| 222 | void make_lowmem_page_readwrite(void *vaddr) |
| 223 | { |
| 224 | pte_t *pte, ptev; |
| 225 | unsigned long address = (unsigned long)vaddr; |
Harvey Harrison | da7bfc5 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 226 | unsigned int level; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 227 | |
Ingo Molnar | f0646e4 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 228 | pte = lookup_address(address, &level); |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 229 | if (pte == NULL) |
| 230 | return; /* vaddr missing */ |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 231 | |
| 232 | ptev = pte_mkwrite(*pte); |
| 233 | |
| 234 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) |
| 235 | BUG(); |
| 236 | } |
| 237 | |
| 238 | |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 239 | static bool xen_page_pinned(void *ptr) |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 240 | { |
| 241 | struct page *page = virt_to_page(ptr); |
| 242 | |
| 243 | return PagePinned(page); |
| 244 | } |
| 245 | |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 246 | static bool xen_iomap_pte(pte_t pte) |
| 247 | { |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 248 | return pte_flags(pte) & _PAGE_IOMAP; |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 249 | } |
| 250 | |
Jeremy Fitzhardinge | eba3ff8 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 251 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 252 | { |
| 253 | struct multicall_space mcs; |
| 254 | struct mmu_update *u; |
| 255 | |
| 256 | mcs = xen_mc_entry(sizeof(*u)); |
| 257 | u = mcs.args; |
| 258 | |
| 259 | /* ptep might be kmapped when using 32-bit HIGHPTE */ |
| 260 | u->ptr = arbitrary_virt_to_machine(ptep).maddr; |
| 261 | u->val = pte_val_ma(pteval); |
| 262 | |
Jeremy Fitzhardinge | eba3ff8 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 263 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 264 | |
| 265 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 266 | } |
Jeremy Fitzhardinge | eba3ff8 | 2009-02-09 12:05:49 -0800 | [diff] [blame] | 267 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); |
| 268 | |
| 269 | static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) |
| 270 | { |
| 271 | xen_set_domain_pte(ptep, pteval, DOMID_IO); |
| 272 | } |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 273 | |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 274 | static void xen_extend_mmu_update(const struct mmu_update *update) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 275 | { |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 276 | struct multicall_space mcs; |
| 277 | struct mmu_update *u; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 278 | |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 279 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
| 280 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 281 | if (mcs.mc != NULL) { |
| 282 | ADD_STATS(mmu_update_extended, 1); |
| 283 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); |
| 284 | |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 285 | mcs.mc->args[1]++; |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 286 | |
| 287 | if (mcs.mc->args[1] < MMU_UPDATE_HISTO) |
| 288 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); |
| 289 | else |
| 290 | ADD_STATS(mmu_update_histo[0], 1); |
| 291 | } else { |
| 292 | ADD_STATS(mmu_update, 1); |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 293 | mcs = __xen_mc_entry(sizeof(*u)); |
| 294 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 295 | ADD_STATS(mmu_update_histo[1], 1); |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | u = mcs.args; |
| 299 | *u = *update; |
| 300 | } |
| 301 | |
| 302 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
| 303 | { |
| 304 | struct mmu_update u; |
| 305 | |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 306 | preempt_disable(); |
| 307 | |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 308 | xen_mc_batch(); |
| 309 | |
Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 310 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
| 311 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 312 | u.val = pmd_val_ma(val); |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 313 | xen_extend_mmu_update(&u); |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 314 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 315 | ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); |
| 316 | |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 317 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 318 | |
| 319 | preempt_enable(); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 320 | } |
| 321 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 322 | void xen_set_pmd(pmd_t *ptr, pmd_t val) |
| 323 | { |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 324 | ADD_STATS(pmd_update, 1); |
| 325 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 326 | /* If page is not pinned, we can just update the entry |
| 327 | directly */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 328 | if (!xen_page_pinned(ptr)) { |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 329 | *ptr = val; |
| 330 | return; |
| 331 | } |
| 332 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 333 | ADD_STATS(pmd_update_pinned, 1); |
| 334 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 335 | xen_set_pmd_hyper(ptr, val); |
| 336 | } |
| 337 | |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 338 | /* |
| 339 | * Associate a virtual page frame with a given physical page frame |
| 340 | * and protection flags for that frame. |
| 341 | */ |
| 342 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) |
| 343 | { |
Jeremy Fitzhardinge | 836fe2f | 2008-07-08 15:06:58 -0700 | [diff] [blame] | 344 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 345 | } |
| 346 | |
| 347 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 348 | pte_t *ptep, pte_t pteval) |
| 349 | { |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 350 | if (xen_iomap_pte(pteval)) { |
| 351 | xen_set_iomap_pte(ptep, pteval); |
| 352 | goto out; |
| 353 | } |
| 354 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 355 | ADD_STATS(set_pte_at, 1); |
| 356 | // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); |
| 357 | ADD_STATS(set_pte_at_current, mm == current->mm); |
| 358 | ADD_STATS(set_pte_at_kernel, mm == &init_mm); |
| 359 | |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 360 | if (mm == current->mm || mm == &init_mm) { |
Jeremy Fitzhardinge | 8965c1c0 | 2007-10-16 11:51:29 -0700 | [diff] [blame] | 361 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 362 | struct multicall_space mcs; |
| 363 | mcs = xen_mc_entry(0); |
| 364 | |
| 365 | MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 366 | ADD_STATS(set_pte_at_batched, 1); |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 367 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 368 | goto out; |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 369 | } else |
| 370 | if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) |
Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 371 | goto out; |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 372 | } |
| 373 | xen_set_pte(ptep, pteval); |
Jeremy Fitzhardinge | 2bd5003 | 2008-04-02 10:54:10 -0700 | [diff] [blame] | 374 | |
Jeremy Fitzhardinge | 2829b44 | 2009-02-17 23:53:19 -0800 | [diff] [blame] | 375 | out: return; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 376 | } |
| 377 | |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 378 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
| 379 | unsigned long addr, pte_t *ptep) |
Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 380 | { |
| 381 | /* Just return the pte as-is. We preserve the bits on commit */ |
| 382 | return *ptep; |
| 383 | } |
| 384 | |
| 385 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
| 386 | pte_t *ptep, pte_t pte) |
| 387 | { |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 388 | struct mmu_update u; |
Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 389 | |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 390 | xen_mc_batch(); |
| 391 | |
Chris Lalancette | 9f32d21 | 2008-10-23 17:40:25 -0700 | [diff] [blame] | 392 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 393 | u.val = pte_val_ma(pte); |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 394 | xen_extend_mmu_update(&u); |
Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 395 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 396 | ADD_STATS(prot_commit, 1); |
| 397 | ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); |
| 398 | |
Jeremy Fitzhardinge | e57778a | 2008-06-16 04:30:02 -0700 | [diff] [blame] | 399 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 400 | } |
| 401 | |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 402 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
| 403 | static pteval_t pte_mfn_to_pfn(pteval_t val) |
| 404 | { |
| 405 | if (val & _PAGE_PRESENT) { |
Jeremy Fitzhardinge | 59438c9 | 2008-07-21 22:59:42 -0700 | [diff] [blame] | 406 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
Jeremy Fitzhardinge | 77be1fa | 2008-07-21 22:59:56 -0700 | [diff] [blame] | 407 | pteval_t flags = val & PTE_FLAGS_MASK; |
Jeremy Fitzhardinge | d8355ac | 2008-07-03 22:10:18 -0700 | [diff] [blame] | 408 | val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 409 | } |
| 410 | |
| 411 | return val; |
| 412 | } |
| 413 | |
| 414 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
| 415 | { |
| 416 | if (val & _PAGE_PRESENT) { |
Jeremy Fitzhardinge | 59438c9 | 2008-07-21 22:59:42 -0700 | [diff] [blame] | 417 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
Jeremy Fitzhardinge | 77be1fa | 2008-07-21 22:59:56 -0700 | [diff] [blame] | 418 | pteval_t flags = val & PTE_FLAGS_MASK; |
Konrad Rzeszutek Wilk | fb38923 | 2011-01-05 15:46:31 -0500 | [diff] [blame] | 419 | unsigned long mfn; |
Jeremy Fitzhardinge | cfd8951 | 2010-08-31 14:06:22 -0700 | [diff] [blame] | 420 | |
Konrad Rzeszutek Wilk | fb38923 | 2011-01-05 15:46:31 -0500 | [diff] [blame] | 421 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 422 | mfn = get_phys_to_machine(pfn); |
| 423 | else |
| 424 | mfn = pfn; |
Jeremy Fitzhardinge | cfd8951 | 2010-08-31 14:06:22 -0700 | [diff] [blame] | 425 | /* |
| 426 | * If there's no mfn for the pfn, then just create an |
| 427 | * empty non-present pte. Unfortunately this loses |
| 428 | * information about the original pfn, so |
| 429 | * pte_mfn_to_pfn is asymmetric. |
| 430 | */ |
| 431 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { |
| 432 | mfn = 0; |
| 433 | flags = 0; |
Konrad Rzeszutek Wilk | fb38923 | 2011-01-05 15:46:31 -0500 | [diff] [blame] | 434 | } else { |
| 435 | /* |
| 436 | * Paramount to do this test _after_ the |
| 437 | * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & |
| 438 | * IDENTITY_FRAME_BIT resolves to true. |
| 439 | */ |
| 440 | mfn &= ~FOREIGN_FRAME_BIT; |
| 441 | if (mfn & IDENTITY_FRAME_BIT) { |
| 442 | mfn &= ~IDENTITY_FRAME_BIT; |
| 443 | flags |= _PAGE_IOMAP; |
| 444 | } |
Jeremy Fitzhardinge | cfd8951 | 2010-08-31 14:06:22 -0700 | [diff] [blame] | 445 | } |
Jeremy Fitzhardinge | cfd8951 | 2010-08-31 14:06:22 -0700 | [diff] [blame] | 446 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 447 | } |
| 448 | |
| 449 | return val; |
| 450 | } |
| 451 | |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 452 | static pteval_t iomap_pte(pteval_t val) |
| 453 | { |
| 454 | if (val & _PAGE_PRESENT) { |
| 455 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
| 456 | pteval_t flags = val & PTE_FLAGS_MASK; |
| 457 | |
| 458 | /* We assume the pte frame number is a MFN, so |
| 459 | just use it as-is. */ |
| 460 | val = ((pteval_t)pfn << PAGE_SHIFT) | flags; |
| 461 | } |
| 462 | |
| 463 | return val; |
| 464 | } |
| 465 | |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 466 | pteval_t xen_pte_val(pte_t pte) |
| 467 | { |
Jeremy Fitzhardinge | 41f2e47 | 2010-03-30 11:47:40 -0700 | [diff] [blame] | 468 | pteval_t pteval = pte.pte; |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 469 | |
Jeremy Fitzhardinge | 41f2e47 | 2010-03-30 11:47:40 -0700 | [diff] [blame] | 470 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ |
| 471 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { |
| 472 | WARN_ON(!pat_enabled); |
| 473 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; |
| 474 | } |
| 475 | |
| 476 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
| 477 | return pteval; |
| 478 | |
| 479 | return pte_mfn_to_pfn(pteval); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 480 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 481 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 482 | |
| 483 | pgdval_t xen_pgd_val(pgd_t pgd) |
| 484 | { |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 485 | return pte_mfn_to_pfn(pgd.pgd); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 486 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 487 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 488 | |
Jeremy Fitzhardinge | 41f2e47 | 2010-03-30 11:47:40 -0700 | [diff] [blame] | 489 | /* |
| 490 | * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 |
| 491 | * are reserved for now, to correspond to the Intel-reserved PAT |
| 492 | * types. |
| 493 | * |
| 494 | * We expect Linux's PAT set as follows: |
| 495 | * |
| 496 | * Idx PTE flags Linux Xen Default |
| 497 | * 0 WB WB WB |
| 498 | * 1 PWT WC WT WT |
| 499 | * 2 PCD UC- UC- UC- |
| 500 | * 3 PCD PWT UC UC UC |
| 501 | * 4 PAT WB WC WB |
| 502 | * 5 PAT PWT WC WP WT |
| 503 | * 6 PAT PCD UC- UC UC- |
| 504 | * 7 PAT PCD PWT UC UC UC |
| 505 | */ |
| 506 | |
| 507 | void xen_set_pat(u64 pat) |
| 508 | { |
| 509 | /* We expect Linux to use a PAT setting of |
| 510 | * UC UC- WC WB (ignoring the PAT flag) */ |
| 511 | WARN_ON(pat != 0x0007010600070106ull); |
| 512 | } |
| 513 | |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 514 | pte_t xen_make_pte(pteval_t pte) |
| 515 | { |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 516 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
| 517 | |
Jeremy Fitzhardinge | 41f2e47 | 2010-03-30 11:47:40 -0700 | [diff] [blame] | 518 | /* If Linux is trying to set a WC pte, then map to the Xen WC. |
| 519 | * If _PAGE_PAT is set, then it probably means it is really |
| 520 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope |
| 521 | * things work out OK... |
| 522 | * |
| 523 | * (We should never see kernel mappings with _PAGE_PSE set, |
| 524 | * but we could see hugetlbfs mappings, I think.). |
| 525 | */ |
| 526 | if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { |
| 527 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) |
| 528 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; |
| 529 | } |
| 530 | |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 531 | /* |
| 532 | * Unprivileged domains are allowed to do IOMAPpings for |
| 533 | * PCI passthrough, but not map ISA space. The ISA |
| 534 | * mappings are just dummy local mappings to keep other |
| 535 | * parts of the kernel happy. |
| 536 | */ |
| 537 | if (unlikely(pte & _PAGE_IOMAP) && |
| 538 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 539 | pte = iomap_pte(pte); |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 540 | } else { |
| 541 | pte &= ~_PAGE_IOMAP; |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 542 | pte = pte_pfn_to_mfn(pte); |
Alex Nixon | 7347b40 | 2010-02-19 13:31:06 -0500 | [diff] [blame] | 543 | } |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 544 | |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 545 | return native_make_pte(pte); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 546 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 547 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 548 | |
Konrad Rzeszutek Wilk | fc25151 | 2010-12-23 16:25:29 -0500 | [diff] [blame] | 549 | #ifdef CONFIG_XEN_DEBUG |
| 550 | pte_t xen_make_pte_debug(pteval_t pte) |
| 551 | { |
| 552 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
| 553 | phys_addr_t other_addr; |
| 554 | bool io_page = false; |
| 555 | pte_t _pte; |
| 556 | |
| 557 | if (pte & _PAGE_IOMAP) |
| 558 | io_page = true; |
| 559 | |
| 560 | _pte = xen_make_pte(pte); |
| 561 | |
| 562 | if (!addr) |
| 563 | return _pte; |
| 564 | |
| 565 | if (io_page && |
| 566 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { |
| 567 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; |
Konrad Rzeszutek Wilk | d88885d | 2011-04-04 14:48:20 -0400 | [diff] [blame] | 568 | WARN_ONCE(addr != other_addr, |
Konrad Rzeszutek Wilk | fc25151 | 2010-12-23 16:25:29 -0500 | [diff] [blame] | 569 | "0x%lx is using VM_IO, but it is 0x%lx!\n", |
| 570 | (unsigned long)addr, (unsigned long)other_addr); |
| 571 | } else { |
| 572 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; |
| 573 | other_addr = (_pte.pte & PTE_PFN_MASK); |
Konrad Rzeszutek Wilk | d88885d | 2011-04-04 14:48:20 -0400 | [diff] [blame] | 574 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), |
Konrad Rzeszutek Wilk | fc25151 | 2010-12-23 16:25:29 -0500 | [diff] [blame] | 575 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", |
| 576 | (unsigned long)addr); |
| 577 | } |
| 578 | |
| 579 | return _pte; |
| 580 | } |
| 581 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); |
| 582 | #endif |
| 583 | |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 584 | pgd_t xen_make_pgd(pgdval_t pgd) |
| 585 | { |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 586 | pgd = pte_pfn_to_mfn(pgd); |
| 587 | return native_make_pgd(pgd); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 588 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 589 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 590 | |
| 591 | pmdval_t xen_pmd_val(pmd_t pmd) |
| 592 | { |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 593 | return pte_mfn_to_pfn(pmd.pmd); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 594 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 595 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
Jeremy Fitzhardinge | 2849914 | 2008-05-09 12:05:57 +0100 | [diff] [blame] | 596 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 597 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 598 | { |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 599 | struct mmu_update u; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 600 | |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 601 | preempt_disable(); |
| 602 | |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 603 | xen_mc_batch(); |
| 604 | |
Jeremy Fitzhardinge | ce803e7 | 2008-07-08 15:06:55 -0700 | [diff] [blame] | 605 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
| 606 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; |
Jeremy Fitzhardinge | 400d349 | 2008-06-16 04:30:03 -0700 | [diff] [blame] | 607 | u.val = pud_val_ma(val); |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 608 | xen_extend_mmu_update(&u); |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 609 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 610 | ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); |
| 611 | |
Jeremy Fitzhardinge | d66bf8f | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 612 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 613 | |
| 614 | preempt_enable(); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 615 | } |
| 616 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 617 | void xen_set_pud(pud_t *ptr, pud_t val) |
| 618 | { |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 619 | ADD_STATS(pud_update, 1); |
| 620 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 621 | /* If page is not pinned, we can just update the entry |
| 622 | directly */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 623 | if (!xen_page_pinned(ptr)) { |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 624 | *ptr = val; |
| 625 | return; |
| 626 | } |
| 627 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 628 | ADD_STATS(pud_update_pinned, 1); |
| 629 | |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 630 | xen_set_pud_hyper(ptr, val); |
| 631 | } |
| 632 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 633 | void xen_set_pte(pte_t *ptep, pte_t pte) |
| 634 | { |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 635 | if (xen_iomap_pte(pte)) { |
| 636 | xen_set_iomap_pte(ptep, pte); |
| 637 | return; |
| 638 | } |
| 639 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 640 | ADD_STATS(pte_update, 1); |
| 641 | // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); |
| 642 | ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); |
| 643 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 644 | #ifdef CONFIG_X86_PAE |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 645 | ptep->pte_high = pte.pte_high; |
| 646 | smp_wmb(); |
| 647 | ptep->pte_low = pte.pte_low; |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 648 | #else |
| 649 | *ptep = pte; |
| 650 | #endif |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 651 | } |
| 652 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 653 | #ifdef CONFIG_X86_PAE |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 654 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 655 | { |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 656 | if (xen_iomap_pte(pte)) { |
| 657 | xen_set_iomap_pte(ptep, pte); |
| 658 | return; |
| 659 | } |
| 660 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 661 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 665 | { |
| 666 | ptep->pte_low = 0; |
| 667 | smp_wmb(); /* make sure low gets written first */ |
| 668 | ptep->pte_high = 0; |
| 669 | } |
| 670 | |
| 671 | void xen_pmd_clear(pmd_t *pmdp) |
| 672 | { |
Jeremy Fitzhardinge | e2426cf | 2008-05-31 01:24:27 +0100 | [diff] [blame] | 673 | set_pmd(pmdp, __pmd(0)); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 674 | } |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 675 | #endif /* CONFIG_X86_PAE */ |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 676 | |
Jeremy Fitzhardinge | abf3303 | 2008-03-17 16:37:07 -0700 | [diff] [blame] | 677 | pmd_t xen_make_pmd(pmdval_t pmd) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 678 | { |
Jeremy Fitzhardinge | ebb9cfe | 2008-06-16 15:01:56 -0700 | [diff] [blame] | 679 | pmd = pte_pfn_to_mfn(pmd); |
Jeremy Fitzhardinge | 947a69c | 2008-03-17 16:37:09 -0700 | [diff] [blame] | 680 | return native_make_pmd(pmd); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 681 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 682 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 683 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 684 | #if PAGETABLE_LEVELS == 4 |
| 685 | pudval_t xen_pud_val(pud_t pud) |
| 686 | { |
| 687 | return pte_mfn_to_pfn(pud.pud); |
| 688 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 689 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 690 | |
| 691 | pud_t xen_make_pud(pudval_t pud) |
| 692 | { |
| 693 | pud = pte_pfn_to_mfn(pud); |
| 694 | |
| 695 | return native_make_pud(pud); |
| 696 | } |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 697 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 698 | |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 699 | pgd_t *xen_get_user_pgd(pgd_t *pgd) |
| 700 | { |
| 701 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
| 702 | unsigned offset = pgd - pgd_page; |
| 703 | pgd_t *user_ptr = NULL; |
| 704 | |
| 705 | if (offset < pgd_index(USER_LIMIT)) { |
| 706 | struct page *page = virt_to_page(pgd_page); |
| 707 | user_ptr = (pgd_t *)page->private; |
| 708 | if (user_ptr) |
| 709 | user_ptr += offset; |
| 710 | } |
| 711 | |
| 712 | return user_ptr; |
| 713 | } |
| 714 | |
| 715 | static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 716 | { |
| 717 | struct mmu_update u; |
| 718 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 719 | u.ptr = virt_to_machine(ptr).maddr; |
| 720 | u.val = pgd_val_ma(val); |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 721 | xen_extend_mmu_update(&u); |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 722 | } |
| 723 | |
| 724 | /* |
| 725 | * Raw hypercall-based set_pgd, intended for in early boot before |
| 726 | * there's a page structure. This implies: |
| 727 | * 1. The only existing pagetable is the kernel's |
| 728 | * 2. It is always pinned |
| 729 | * 3. It has no user pagetable attached to it |
| 730 | */ |
| 731 | void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
| 732 | { |
| 733 | preempt_disable(); |
| 734 | |
| 735 | xen_mc_batch(); |
| 736 | |
| 737 | __xen_set_pgd_hyper(ptr, val); |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 738 | |
| 739 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 740 | |
| 741 | preempt_enable(); |
| 742 | } |
| 743 | |
| 744 | void xen_set_pgd(pgd_t *ptr, pgd_t val) |
| 745 | { |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 746 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
| 747 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 748 | ADD_STATS(pgd_update, 1); |
| 749 | |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 750 | /* If page is not pinned, we can just update the entry |
| 751 | directly */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 752 | if (!xen_page_pinned(ptr)) { |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 753 | *ptr = val; |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 754 | if (user_ptr) { |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 755 | WARN_ON(xen_page_pinned(user_ptr)); |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 756 | *user_ptr = val; |
| 757 | } |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 758 | return; |
| 759 | } |
| 760 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 761 | ADD_STATS(pgd_update_pinned, 1); |
| 762 | ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); |
| 763 | |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 764 | /* If it's pinned, then we can at least batch the kernel and |
| 765 | user updates together. */ |
| 766 | xen_mc_batch(); |
| 767 | |
| 768 | __xen_set_pgd_hyper(ptr, val); |
| 769 | if (user_ptr) |
| 770 | __xen_set_pgd_hyper(user_ptr, val); |
| 771 | |
| 772 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
Jeremy Fitzhardinge | f6e5873 | 2008-07-08 15:06:38 -0700 | [diff] [blame] | 773 | } |
| 774 | #endif /* PAGETABLE_LEVELS == 4 */ |
| 775 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 776 | /* |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 777 | * (Yet another) pagetable walker. This one is intended for pinning a |
| 778 | * pagetable. This means that it walks a pagetable and calls the |
| 779 | * callback function on each page it finds making up the page table, |
| 780 | * at every level. It walks the entire pagetable, but it only bothers |
| 781 | * pinning pte pages which are below limit. In the normal case this |
| 782 | * will be STACK_TOP_MAX, but at boot we need to pin up to |
| 783 | * FIXADDR_TOP. |
| 784 | * |
| 785 | * For 32-bit the important bit is that we don't pin beyond there, |
| 786 | * because then we start getting into Xen's ptes. |
| 787 | * |
| 788 | * For 64-bit, we must skip the Xen hole in the middle of the address |
| 789 | * space, just after the big x86-64 virtual hole. |
| 790 | */ |
Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 791 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, |
| 792 | int (*func)(struct mm_struct *mm, struct page *, |
| 793 | enum pt_level), |
| 794 | unsigned long limit) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 795 | { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 796 | int flush = 0; |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 797 | unsigned hole_low, hole_high; |
| 798 | unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; |
| 799 | unsigned pgdidx, pudidx, pmdidx; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 800 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 801 | /* The limit is the last byte to be touched */ |
| 802 | limit--; |
| 803 | BUG_ON(limit >= FIXADDR_TOP); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 804 | |
| 805 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 806 | return 0; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 807 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 808 | /* |
| 809 | * 64-bit has a great big hole in the middle of the address |
| 810 | * space, which contains the Xen mappings. On 32-bit these |
| 811 | * will end up making a zero-sized hole and so is a no-op. |
| 812 | */ |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 813 | hole_low = pgd_index(USER_LIMIT); |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 814 | hole_high = pgd_index(PAGE_OFFSET); |
| 815 | |
| 816 | pgdidx_limit = pgd_index(limit); |
| 817 | #if PTRS_PER_PUD > 1 |
| 818 | pudidx_limit = pud_index(limit); |
| 819 | #else |
| 820 | pudidx_limit = 0; |
| 821 | #endif |
| 822 | #if PTRS_PER_PMD > 1 |
| 823 | pmdidx_limit = pmd_index(limit); |
| 824 | #else |
| 825 | pmdidx_limit = 0; |
| 826 | #endif |
| 827 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 828 | for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 829 | pud_t *pud; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 830 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 831 | if (pgdidx >= hole_low && pgdidx < hole_high) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 832 | continue; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 833 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 834 | if (!pgd_val(pgd[pgdidx])) |
| 835 | continue; |
| 836 | |
| 837 | pud = pud_offset(&pgd[pgdidx], 0); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 838 | |
| 839 | if (PTRS_PER_PUD > 1) /* not folded */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 840 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 841 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 842 | for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 843 | pmd_t *pmd; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 844 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 845 | if (pgdidx == pgdidx_limit && |
| 846 | pudidx > pudidx_limit) |
| 847 | goto out; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 848 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 849 | if (pud_none(pud[pudidx])) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 850 | continue; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 851 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 852 | pmd = pmd_offset(&pud[pudidx], 0); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 853 | |
| 854 | if (PTRS_PER_PMD > 1) /* not folded */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 855 | flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 856 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 857 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { |
| 858 | struct page *pte; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 859 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 860 | if (pgdidx == pgdidx_limit && |
| 861 | pudidx == pudidx_limit && |
| 862 | pmdidx > pmdidx_limit) |
| 863 | goto out; |
| 864 | |
| 865 | if (pmd_none(pmd[pmdidx])) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 866 | continue; |
| 867 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 868 | pte = pmd_page(pmd[pmdidx]); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 869 | flush |= (*func)(mm, pte, PT_PTE); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 870 | } |
| 871 | } |
| 872 | } |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 873 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 874 | out: |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 875 | /* Do the top level last, so that the callbacks can use it as |
| 876 | a cue to do final things like tlb flushes. */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 877 | flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 878 | |
| 879 | return flush; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 880 | } |
| 881 | |
Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 882 | static int xen_pgd_walk(struct mm_struct *mm, |
| 883 | int (*func)(struct mm_struct *mm, struct page *, |
| 884 | enum pt_level), |
| 885 | unsigned long limit) |
| 886 | { |
| 887 | return __xen_pgd_walk(mm, mm->pgd, func, limit); |
| 888 | } |
| 889 | |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 890 | /* If we're using split pte locks, then take the page's lock and |
| 891 | return a pointer to it. Otherwise return NULL. */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 892 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 893 | { |
| 894 | spinlock_t *ptl = NULL; |
| 895 | |
Jeremy Fitzhardinge | f7d0b92 | 2008-09-09 15:43:22 -0700 | [diff] [blame] | 896 | #if USE_SPLIT_PTLOCKS |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 897 | ptl = __pte_lockptr(page); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 898 | spin_lock_nest_lock(ptl, &mm->page_table_lock); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 899 | #endif |
| 900 | |
| 901 | return ptl; |
| 902 | } |
| 903 | |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 904 | static void xen_pte_unlock(void *v) |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 905 | { |
| 906 | spinlock_t *ptl = v; |
| 907 | spin_unlock(ptl); |
| 908 | } |
| 909 | |
| 910 | static void xen_do_pin(unsigned level, unsigned long pfn) |
| 911 | { |
| 912 | struct mmuext_op *op; |
| 913 | struct multicall_space mcs; |
| 914 | |
| 915 | mcs = __xen_mc_entry(sizeof(*op)); |
| 916 | op = mcs.args; |
| 917 | op->cmd = level; |
| 918 | op->arg1.mfn = pfn_to_mfn(pfn); |
| 919 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
| 920 | } |
| 921 | |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 922 | static int xen_pin_page(struct mm_struct *mm, struct page *page, |
| 923 | enum pt_level level) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 924 | { |
Christoph Lameter | d60cd46 | 2008-04-28 02:12:51 -0700 | [diff] [blame] | 925 | unsigned pgfl = TestSetPagePinned(page); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 926 | int flush; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 927 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 928 | if (pgfl) |
| 929 | flush = 0; /* already pinned */ |
| 930 | else if (PageHighMem(page)) |
| 931 | /* kmaps need flushing if we found an unpinned |
| 932 | highpage */ |
| 933 | flush = 1; |
| 934 | else { |
| 935 | void *pt = lowmem_page_address(page); |
| 936 | unsigned long pfn = page_to_pfn(page); |
| 937 | struct multicall_space mcs = __xen_mc_entry(0); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 938 | spinlock_t *ptl; |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 939 | |
| 940 | flush = 0; |
| 941 | |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 942 | /* |
| 943 | * We need to hold the pagetable lock between the time |
| 944 | * we make the pagetable RO and when we actually pin |
| 945 | * it. If we don't, then other users may come in and |
| 946 | * attempt to update the pagetable by writing it, |
| 947 | * which will fail because the memory is RO but not |
| 948 | * pinned, so Xen won't do the trap'n'emulate. |
| 949 | * |
| 950 | * If we're using split pte locks, we can't hold the |
| 951 | * entire pagetable's worth of locks during the |
| 952 | * traverse, because we may wrap the preempt count (8 |
| 953 | * bits). The solution is to mark RO and pin each PTE |
| 954 | * page while holding the lock. This means the number |
| 955 | * of locks we end up holding is never more than a |
| 956 | * batch size (~32 entries, at present). |
| 957 | * |
| 958 | * If we're not using split pte locks, we needn't pin |
| 959 | * the PTE pages independently, because we're |
| 960 | * protected by the overall pagetable lock. |
| 961 | */ |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 962 | ptl = NULL; |
| 963 | if (level == PT_PTE) |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 964 | ptl = xen_pte_lock(page, mm); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 965 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 966 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
| 967 | pfn_pte(pfn, PAGE_KERNEL_RO), |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 968 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
| 969 | |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 970 | if (ptl) { |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 971 | xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); |
| 972 | |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 973 | /* Queue a deferred unlock for when this batch |
| 974 | is completed. */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 975 | xen_mc_callback(xen_pte_unlock, ptl); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 976 | } |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 977 | } |
| 978 | |
| 979 | return flush; |
| 980 | } |
| 981 | |
| 982 | /* This is called just after a mm has been created, but it has not |
| 983 | been used yet. We need to make sure that its pagetable is all |
| 984 | read-only, and can be pinned. */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 985 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 986 | { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 987 | xen_mc_batch(); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 988 | |
Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 989 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { |
Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 990 | /* re-enable interrupts for flushing */ |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 991 | xen_mc_issue(0); |
Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 992 | |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 993 | kmap_flush_unused(); |
Jeremy Fitzhardinge | d05fdf3 | 2008-10-28 19:23:06 +1100 | [diff] [blame] | 994 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 995 | xen_mc_batch(); |
| 996 | } |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 997 | |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 998 | #ifdef CONFIG_X86_64 |
| 999 | { |
| 1000 | pgd_t *user_pgd = xen_get_user_pgd(pgd); |
| 1001 | |
| 1002 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); |
| 1003 | |
| 1004 | if (user_pgd) { |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1005 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 1006 | xen_do_pin(MMUEXT_PIN_L4_TABLE, |
| 1007 | PFN_DOWN(__pa(user_pgd))); |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1008 | } |
| 1009 | } |
| 1010 | #else /* CONFIG_X86_32 */ |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1011 | #ifdef CONFIG_X86_PAE |
| 1012 | /* Need to make sure unshared kernel PMD is pinnable */ |
Jeremy Fitzhardinge | 47cb2ed | 2008-11-06 13:48:24 -0800 | [diff] [blame] | 1013 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1014 | PT_PMD); |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1015 | #endif |
Jeremy Fitzhardinge | 2849914 | 2008-05-09 12:05:57 +0100 | [diff] [blame] | 1016 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1017 | #endif /* CONFIG_X86_64 */ |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1018 | xen_mc_issue(0); |
| 1019 | } |
| 1020 | |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1021 | static void xen_pgd_pin(struct mm_struct *mm) |
| 1022 | { |
| 1023 | __xen_pgd_pin(mm, mm->pgd); |
| 1024 | } |
| 1025 | |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1026 | /* |
| 1027 | * On save, we need to pin all pagetables to make sure they get their |
| 1028 | * mfns turned into pfns. Search the list for any unpinned pgds and pin |
| 1029 | * them (unpinned pgds are not currently in use, probably because the |
| 1030 | * process is under construction or destruction). |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1031 | * |
| 1032 | * Expected to be called in stop_machine() ("equivalent to taking |
| 1033 | * every spinlock in the system"), so the locking doesn't really |
| 1034 | * matter all that much. |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1035 | */ |
| 1036 | void xen_mm_pin_all(void) |
| 1037 | { |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1038 | struct page *page; |
| 1039 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 1040 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1041 | |
| 1042 | list_for_each_entry(page, &pgd_list, lru) { |
| 1043 | if (!PagePinned(page)) { |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1044 | __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1045 | SetPageSavePinned(page); |
| 1046 | } |
| 1047 | } |
| 1048 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 1049 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1050 | } |
| 1051 | |
Eduardo Habkost | c1f2f09 | 2008-07-08 15:06:24 -0700 | [diff] [blame] | 1052 | /* |
| 1053 | * The init_mm pagetable is really pinned as soon as its created, but |
| 1054 | * that's before we have page structures to store the bits. So do all |
| 1055 | * the book-keeping now. |
| 1056 | */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1057 | static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, |
| 1058 | enum pt_level level) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1059 | { |
| 1060 | SetPagePinned(page); |
| 1061 | return 0; |
| 1062 | } |
| 1063 | |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1064 | static void __init xen_mark_init_mm_pinned(void) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1065 | { |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1066 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1067 | } |
| 1068 | |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1069 | static int xen_unpin_page(struct mm_struct *mm, struct page *page, |
| 1070 | enum pt_level level) |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1071 | { |
Christoph Lameter | d60cd46 | 2008-04-28 02:12:51 -0700 | [diff] [blame] | 1072 | unsigned pgfl = TestClearPagePinned(page); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1073 | |
| 1074 | if (pgfl && !PageHighMem(page)) { |
| 1075 | void *pt = lowmem_page_address(page); |
| 1076 | unsigned long pfn = page_to_pfn(page); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1077 | spinlock_t *ptl = NULL; |
| 1078 | struct multicall_space mcs; |
| 1079 | |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 1080 | /* |
| 1081 | * Do the converse to pin_page. If we're using split |
| 1082 | * pte locks, we must be holding the lock for while |
| 1083 | * the pte page is unpinned but still RO to prevent |
| 1084 | * concurrent updates from seeing it in this |
| 1085 | * partially-pinned state. |
| 1086 | */ |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1087 | if (level == PT_PTE) { |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1088 | ptl = xen_pte_lock(page, mm); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1089 | |
Jeremy Fitzhardinge | 11ad93e | 2008-08-19 13:32:51 -0700 | [diff] [blame] | 1090 | if (ptl) |
| 1091 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | mcs = __xen_mc_entry(0); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1095 | |
| 1096 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
| 1097 | pfn_pte(pfn, PAGE_KERNEL), |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1098 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
| 1099 | |
| 1100 | if (ptl) { |
| 1101 | /* unlock when batch completed */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1102 | xen_mc_callback(xen_pte_unlock, ptl); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1103 | } |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1104 | } |
| 1105 | |
| 1106 | return 0; /* never need to flush on unpin */ |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1107 | } |
| 1108 | |
| 1109 | /* Release a pagetables pages back as normal RW */ |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1110 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1111 | { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1112 | xen_mc_batch(); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1113 | |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1114 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1115 | |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1116 | #ifdef CONFIG_X86_64 |
| 1117 | { |
| 1118 | pgd_t *user_pgd = xen_get_user_pgd(pgd); |
| 1119 | |
| 1120 | if (user_pgd) { |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 1121 | xen_do_pin(MMUEXT_UNPIN_TABLE, |
| 1122 | PFN_DOWN(__pa(user_pgd))); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1123 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1124 | } |
| 1125 | } |
| 1126 | #endif |
| 1127 | |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1128 | #ifdef CONFIG_X86_PAE |
| 1129 | /* Need to make sure unshared kernel PMD is unpinned */ |
Jeremy Fitzhardinge | 47cb2ed | 2008-11-06 13:48:24 -0800 | [diff] [blame] | 1130 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1131 | PT_PMD); |
Jeremy Fitzhardinge | 5deb30d | 2008-07-08 15:07:06 -0700 | [diff] [blame] | 1132 | #endif |
Jeremy Fitzhardinge | d6182fb | 2008-07-08 15:07:13 -0700 | [diff] [blame] | 1133 | |
Ian Campbell | 86bbc2c | 2008-11-21 10:21:33 +0000 | [diff] [blame] | 1134 | __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1135 | |
| 1136 | xen_mc_issue(0); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1137 | } |
| 1138 | |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1139 | static void xen_pgd_unpin(struct mm_struct *mm) |
| 1140 | { |
| 1141 | __xen_pgd_unpin(mm, mm->pgd); |
| 1142 | } |
| 1143 | |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1144 | /* |
| 1145 | * On resume, undo any pinning done at save, so that the rest of the |
| 1146 | * kernel doesn't see any unexpected pinned pagetables. |
| 1147 | */ |
| 1148 | void xen_mm_unpin_all(void) |
| 1149 | { |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1150 | struct page *page; |
| 1151 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 1152 | spin_lock(&pgd_lock); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1153 | |
| 1154 | list_for_each_entry(page, &pgd_list, lru) { |
| 1155 | if (PageSavePinned(page)) { |
| 1156 | BUG_ON(!PagePinned(page)); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1157 | __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1158 | ClearPageSavePinned(page); |
| 1159 | } |
| 1160 | } |
| 1161 | |
Andrea Arcangeli | a79e53d | 2011-02-16 15:45:22 -0800 | [diff] [blame] | 1162 | spin_unlock(&pgd_lock); |
Jeremy Fitzhardinge | 0e91398 | 2008-05-26 23:31:27 +0100 | [diff] [blame] | 1163 | } |
| 1164 | |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1165 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 1166 | { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1167 | spin_lock(&next->page_table_lock); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1168 | xen_pgd_pin(next); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1169 | spin_unlock(&next->page_table_lock); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1170 | } |
| 1171 | |
| 1172 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
| 1173 | { |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1174 | spin_lock(&mm->page_table_lock); |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1175 | xen_pgd_pin(mm); |
Jeremy Fitzhardinge | f4f97b3 | 2007-07-17 18:37:05 -0700 | [diff] [blame] | 1176 | spin_unlock(&mm->page_table_lock); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1177 | } |
| 1178 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1179 | |
| 1180 | #ifdef CONFIG_SMP |
| 1181 | /* Another cpu may still have their %cr3 pointing at the pagetable, so |
| 1182 | we need to repoint it somewhere else before we can unpin it. */ |
| 1183 | static void drop_other_mm_ref(void *info) |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1184 | { |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1185 | struct mm_struct *mm = info; |
Jeremy Fitzhardinge | ce87b3d | 2008-07-08 15:06:40 -0700 | [diff] [blame] | 1186 | struct mm_struct *active_mm; |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1187 | |
Brian Gerst | 9eb912d | 2009-01-19 00:38:57 +0900 | [diff] [blame] | 1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
Jeremy Fitzhardinge | ce87b3d | 2008-07-08 15:06:40 -0700 | [diff] [blame] | 1189 | |
| 1190 | if (active_mm == mm) |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1191 | leave_mm(smp_processor_id()); |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1192 | |
| 1193 | /* If this cpu still has a stale cr3 reference, then make sure |
| 1194 | it has been flushed. */ |
Jeremy Fitzhardinge | 7fd7d83 | 2009-02-17 23:24:03 -0800 | [diff] [blame] | 1195 | if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1196 | load_cr3(swapper_pg_dir); |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1197 | } |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1198 | |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1199 | static void xen_drop_mm_ref(struct mm_struct *mm) |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1200 | { |
Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1201 | cpumask_var_t mask; |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1202 | unsigned cpu; |
| 1203 | |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1204 | if (current->active_mm == mm) { |
| 1205 | if (current->mm == mm) |
| 1206 | load_cr3(swapper_pg_dir); |
| 1207 | else |
| 1208 | leave_mm(smp_processor_id()); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1209 | } |
| 1210 | |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1211 | /* Get the "official" set of cpus referring to our pagetable. */ |
Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1212 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
| 1213 | for_each_online_cpu(cpu) { |
Rusty Russell | 78f1c4d | 2009-09-24 09:34:51 -0600 | [diff] [blame] | 1214 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) |
Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1215 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) |
| 1216 | continue; |
| 1217 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); |
| 1218 | } |
| 1219 | return; |
| 1220 | } |
Rusty Russell | 78f1c4d | 2009-09-24 09:34:51 -0600 | [diff] [blame] | 1221 | cpumask_copy(mask, mm_cpumask(mm)); |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1222 | |
| 1223 | /* It's possible that a vcpu may have a stale reference to our |
| 1224 | cr3, because its in lazy mode, and it hasn't yet flushed |
| 1225 | its set of pending hypercalls yet. In this case, we can |
| 1226 | look at its actual current cr3 value, and force it to flush |
| 1227 | if needed. */ |
| 1228 | for_each_online_cpu(cpu) { |
| 1229 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) |
Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1230 | cpumask_set_cpu(cpu, mask); |
Jeremy Fitzhardinge | 9f79991 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
Mike Travis | e4d9820 | 2008-12-16 17:34:05 -0800 | [diff] [blame] | 1233 | if (!cpumask_empty(mask)) |
| 1234 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); |
| 1235 | free_cpumask_var(mask); |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1236 | } |
| 1237 | #else |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1238 | static void xen_drop_mm_ref(struct mm_struct *mm) |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1239 | { |
| 1240 | if (current->active_mm == mm) |
| 1241 | load_cr3(swapper_pg_dir); |
| 1242 | } |
| 1243 | #endif |
| 1244 | |
| 1245 | /* |
| 1246 | * While a process runs, Xen pins its pagetables, which means that the |
| 1247 | * hypervisor forces it to be read-only, and it controls all updates |
| 1248 | * to it. This means that all pagetable updates have to go via the |
| 1249 | * hypervisor, which is moderately expensive. |
| 1250 | * |
| 1251 | * Since we're pulling the pagetable down, we switch to use init_mm, |
| 1252 | * unpin old process pagetable and mark it all read-write, which |
| 1253 | * allows further operations on it to be simple memory accesses. |
| 1254 | * |
| 1255 | * The only subtle point is that another CPU may be still using the |
| 1256 | * pagetable because of lazy tlb flushing. This means we need need to |
| 1257 | * switch all CPUs off this pagetable before we can unpin it. |
| 1258 | */ |
| 1259 | void xen_exit_mmap(struct mm_struct *mm) |
| 1260 | { |
| 1261 | get_cpu(); /* make sure we don't move around */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1262 | xen_drop_mm_ref(mm); |
Jeremy Fitzhardinge | f87e4ca | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1263 | put_cpu(); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1264 | |
Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1265 | spin_lock(&mm->page_table_lock); |
Jeremy Fitzhardinge | df912ea | 2007-09-25 11:50:00 -0700 | [diff] [blame] | 1266 | |
| 1267 | /* pgd may not be pinned in the error exit path of execve */ |
Jeremy Fitzhardinge | 7708ad6 | 2008-08-19 13:34:22 -0700 | [diff] [blame] | 1268 | if (xen_page_pinned(mm->pgd)) |
Jeremy Fitzhardinge | eefb47f | 2008-10-08 13:01:39 -0700 | [diff] [blame] | 1269 | xen_pgd_unpin(mm); |
Jeremy Fitzhardinge | 7426071 | 2007-10-16 11:51:30 -0700 | [diff] [blame] | 1270 | |
Jeremy Fitzhardinge | f120f13 | 2007-07-17 18:37:06 -0700 | [diff] [blame] | 1271 | spin_unlock(&mm->page_table_lock); |
Jeremy Fitzhardinge | 3b827c1 | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1272 | } |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 1273 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1274 | static __init void xen_pagetable_setup_start(pgd_t *base) |
| 1275 | { |
| 1276 | } |
| 1277 | |
Thomas Gleixner | f1d7062 | 2009-08-20 13:13:52 +0200 | [diff] [blame] | 1278 | static void xen_post_allocator_init(void); |
| 1279 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1280 | static __init void xen_pagetable_setup_done(pgd_t *base) |
| 1281 | { |
| 1282 | xen_setup_shared_info(); |
Thomas Gleixner | f1d7062 | 2009-08-20 13:13:52 +0200 | [diff] [blame] | 1283 | xen_post_allocator_init(); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1284 | } |
| 1285 | |
| 1286 | static void xen_write_cr2(unsigned long cr2) |
| 1287 | { |
| 1288 | percpu_read(xen_vcpu)->arch.cr2 = cr2; |
| 1289 | } |
| 1290 | |
| 1291 | static unsigned long xen_read_cr2(void) |
| 1292 | { |
| 1293 | return percpu_read(xen_vcpu)->arch.cr2; |
| 1294 | } |
| 1295 | |
| 1296 | unsigned long xen_read_cr2_direct(void) |
| 1297 | { |
| 1298 | return percpu_read(xen_vcpu_info.arch.cr2); |
| 1299 | } |
| 1300 | |
| 1301 | static void xen_flush_tlb(void) |
| 1302 | { |
| 1303 | struct mmuext_op *op; |
| 1304 | struct multicall_space mcs; |
| 1305 | |
| 1306 | preempt_disable(); |
| 1307 | |
| 1308 | mcs = xen_mc_entry(sizeof(*op)); |
| 1309 | |
| 1310 | op = mcs.args; |
| 1311 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; |
| 1312 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
| 1313 | |
| 1314 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 1315 | |
| 1316 | preempt_enable(); |
| 1317 | } |
| 1318 | |
| 1319 | static void xen_flush_tlb_single(unsigned long addr) |
| 1320 | { |
| 1321 | struct mmuext_op *op; |
| 1322 | struct multicall_space mcs; |
| 1323 | |
| 1324 | preempt_disable(); |
| 1325 | |
| 1326 | mcs = xen_mc_entry(sizeof(*op)); |
| 1327 | op = mcs.args; |
| 1328 | op->cmd = MMUEXT_INVLPG_LOCAL; |
| 1329 | op->arg1.linear_addr = addr & PAGE_MASK; |
| 1330 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
| 1331 | |
| 1332 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 1333 | |
| 1334 | preempt_enable(); |
| 1335 | } |
| 1336 | |
| 1337 | static void xen_flush_tlb_others(const struct cpumask *cpus, |
| 1338 | struct mm_struct *mm, unsigned long va) |
| 1339 | { |
| 1340 | struct { |
| 1341 | struct mmuext_op op; |
| 1342 | DECLARE_BITMAP(mask, NR_CPUS); |
| 1343 | } *args; |
| 1344 | struct multicall_space mcs; |
| 1345 | |
Jeremy Fitzhardinge | e3f8a74 | 2009-03-04 17:36:57 -0800 | [diff] [blame] | 1346 | if (cpumask_empty(cpus)) |
| 1347 | return; /* nothing to do */ |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1348 | |
| 1349 | mcs = xen_mc_entry(sizeof(*args)); |
| 1350 | args = mcs.args; |
| 1351 | args->op.arg2.vcpumask = to_cpumask(args->mask); |
| 1352 | |
| 1353 | /* Remove us, and any offline CPUS. */ |
| 1354 | cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); |
| 1355 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1356 | |
| 1357 | if (va == TLB_FLUSH_ALL) { |
| 1358 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
| 1359 | } else { |
| 1360 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
| 1361 | args->op.arg1.linear_addr = va; |
| 1362 | } |
| 1363 | |
| 1364 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); |
| 1365 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1366 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 1367 | } |
| 1368 | |
| 1369 | static unsigned long xen_read_cr3(void) |
| 1370 | { |
| 1371 | return percpu_read(xen_cr3); |
| 1372 | } |
| 1373 | |
| 1374 | static void set_current_cr3(void *v) |
| 1375 | { |
| 1376 | percpu_write(xen_current_cr3, (unsigned long)v); |
| 1377 | } |
| 1378 | |
| 1379 | static void __xen_write_cr3(bool kernel, unsigned long cr3) |
| 1380 | { |
| 1381 | struct mmuext_op *op; |
| 1382 | struct multicall_space mcs; |
| 1383 | unsigned long mfn; |
| 1384 | |
| 1385 | if (cr3) |
| 1386 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); |
| 1387 | else |
| 1388 | mfn = 0; |
| 1389 | |
| 1390 | WARN_ON(mfn == 0 && kernel); |
| 1391 | |
| 1392 | mcs = __xen_mc_entry(sizeof(*op)); |
| 1393 | |
| 1394 | op = mcs.args; |
| 1395 | op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; |
| 1396 | op->arg1.mfn = mfn; |
| 1397 | |
| 1398 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
| 1399 | |
| 1400 | if (kernel) { |
| 1401 | percpu_write(xen_cr3, cr3); |
| 1402 | |
| 1403 | /* Update xen_current_cr3 once the batch has actually |
| 1404 | been submitted. */ |
| 1405 | xen_mc_callback(set_current_cr3, (void *)cr3); |
| 1406 | } |
| 1407 | } |
| 1408 | |
| 1409 | static void xen_write_cr3(unsigned long cr3) |
| 1410 | { |
| 1411 | BUG_ON(preemptible()); |
| 1412 | |
| 1413 | xen_mc_batch(); /* disables interrupts */ |
| 1414 | |
| 1415 | /* Update while interrupts are disabled, so its atomic with |
| 1416 | respect to ipis */ |
| 1417 | percpu_write(xen_cr3, cr3); |
| 1418 | |
| 1419 | __xen_write_cr3(true, cr3); |
| 1420 | |
| 1421 | #ifdef CONFIG_X86_64 |
| 1422 | { |
| 1423 | pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); |
| 1424 | if (user_pgd) |
| 1425 | __xen_write_cr3(false, __pa(user_pgd)); |
| 1426 | else |
| 1427 | __xen_write_cr3(false, 0); |
| 1428 | } |
| 1429 | #endif |
| 1430 | |
| 1431 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ |
| 1432 | } |
| 1433 | |
| 1434 | static int xen_pgd_alloc(struct mm_struct *mm) |
| 1435 | { |
| 1436 | pgd_t *pgd = mm->pgd; |
| 1437 | int ret = 0; |
| 1438 | |
| 1439 | BUG_ON(PagePinned(virt_to_page(pgd))); |
| 1440 | |
| 1441 | #ifdef CONFIG_X86_64 |
| 1442 | { |
| 1443 | struct page *page = virt_to_page(pgd); |
| 1444 | pgd_t *user_pgd; |
| 1445 | |
| 1446 | BUG_ON(page->private != 0); |
| 1447 | |
| 1448 | ret = -ENOMEM; |
| 1449 | |
| 1450 | user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
| 1451 | page->private = (unsigned long)user_pgd; |
| 1452 | |
| 1453 | if (user_pgd != NULL) { |
| 1454 | user_pgd[pgd_index(VSYSCALL_START)] = |
| 1455 | __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); |
| 1456 | ret = 0; |
| 1457 | } |
| 1458 | |
| 1459 | BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); |
| 1460 | } |
| 1461 | #endif |
| 1462 | |
| 1463 | return ret; |
| 1464 | } |
| 1465 | |
| 1466 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
| 1467 | { |
| 1468 | #ifdef CONFIG_X86_64 |
| 1469 | pgd_t *user_pgd = xen_get_user_pgd(pgd); |
| 1470 | |
| 1471 | if (user_pgd) |
| 1472 | free_page((unsigned long)user_pgd); |
| 1473 | #endif |
| 1474 | } |
| 1475 | |
Stefano Stabellini | ee17645 | 2011-04-19 14:47:31 +0100 | [diff] [blame^] | 1476 | #ifdef CONFIG_X86_32 |
Jeremy Fitzhardinge | 1f4f931 | 2009-02-02 13:58:06 -0800 | [diff] [blame] | 1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
| 1478 | { |
| 1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
| 1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
| 1481 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & |
| 1482 | pte_val_ma(pte)); |
Stefano Stabellini | ee17645 | 2011-04-19 14:47:31 +0100 | [diff] [blame^] | 1483 | |
| 1484 | return pte; |
| 1485 | } |
| 1486 | #else /* CONFIG_X86_64 */ |
| 1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
| 1488 | { |
| 1489 | unsigned long pfn = pte_pfn(pte); |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 1490 | |
| 1491 | /* |
| 1492 | * If the new pfn is within the range of the newly allocated |
| 1493 | * kernel pagetable, and it isn't being mapped into an |
Stefano Stabellini | d8aa5ec | 2011-03-09 14:22:05 +0000 | [diff] [blame] | 1494 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
| 1495 | * it is RO. |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 1496 | */ |
Stefano Stabellini | d8aa5ec | 2011-03-09 14:22:05 +0000 | [diff] [blame] | 1497 | if (((!is_early_ioremap_ptep(ptep) && |
| 1498 | pfn >= pgt_buf_start && pfn < pgt_buf_end)) || |
| 1499 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) |
Jeremy Fitzhardinge | fef5ba7 | 2010-10-13 16:02:24 -0700 | [diff] [blame] | 1500 | pte = pte_wrprotect(pte); |
Jeremy Fitzhardinge | 1f4f931 | 2009-02-02 13:58:06 -0800 | [diff] [blame] | 1501 | |
| 1502 | return pte; |
| 1503 | } |
Stefano Stabellini | ee17645 | 2011-04-19 14:47:31 +0100 | [diff] [blame^] | 1504 | #endif /* CONFIG_X86_64 */ |
Jeremy Fitzhardinge | 1f4f931 | 2009-02-02 13:58:06 -0800 | [diff] [blame] | 1505 | |
| 1506 | /* Init-time set_pte while constructing initial pagetables, which |
| 1507 | doesn't allow RO pagetable pages to be remapped RW */ |
| 1508 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) |
| 1509 | { |
| 1510 | pte = mask_rw_pte(ptep, pte); |
| 1511 | |
| 1512 | xen_set_pte(ptep, pte); |
| 1513 | } |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1514 | |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1515 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
| 1516 | { |
| 1517 | struct mmuext_op op; |
| 1518 | op.cmd = cmd; |
| 1519 | op.arg1.mfn = pfn_to_mfn(pfn); |
| 1520 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) |
| 1521 | BUG(); |
| 1522 | } |
| 1523 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1524 | /* Early in boot, while setting up the initial pagetable, assume |
| 1525 | everything is pinned. */ |
| 1526 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
| 1527 | { |
| 1528 | #ifdef CONFIG_FLATMEM |
| 1529 | BUG_ON(mem_map); /* should only be used early */ |
| 1530 | #endif |
| 1531 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1532 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
| 1533 | } |
| 1534 | |
| 1535 | /* Used for pmd and pud */ |
| 1536 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
| 1537 | { |
| 1538 | #ifdef CONFIG_FLATMEM |
| 1539 | BUG_ON(mem_map); /* should only be used early */ |
| 1540 | #endif |
| 1541 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1542 | } |
| 1543 | |
| 1544 | /* Early release_pte assumes that all pts are pinned, since there's |
| 1545 | only init_mm and anything attached to that is pinned. */ |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1546 | static __init void xen_release_pte_init(unsigned long pfn) |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1547 | { |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1548 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1549 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
| 1550 | } |
| 1551 | |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1552 | static __init void xen_release_pmd_init(unsigned long pfn) |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1553 | { |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 1554 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1555 | } |
| 1556 | |
| 1557 | /* This needs to make sure the new pte page is pinned iff its being |
| 1558 | attached to a pinned pagetable. */ |
| 1559 | static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) |
| 1560 | { |
| 1561 | struct page *page = pfn_to_page(pfn); |
| 1562 | |
| 1563 | if (PagePinned(virt_to_page(mm->pgd))) { |
| 1564 | SetPagePinned(page); |
| 1565 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1566 | if (!PageHighMem(page)) { |
| 1567 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); |
| 1568 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
| 1569 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
| 1570 | } else { |
| 1571 | /* make sure there are no stray mappings of |
| 1572 | this page */ |
| 1573 | kmap_flush_unused(); |
| 1574 | } |
| 1575 | } |
| 1576 | } |
| 1577 | |
| 1578 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
| 1579 | { |
| 1580 | xen_alloc_ptpage(mm, pfn, PT_PTE); |
| 1581 | } |
| 1582 | |
| 1583 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
| 1584 | { |
| 1585 | xen_alloc_ptpage(mm, pfn, PT_PMD); |
| 1586 | } |
| 1587 | |
| 1588 | /* This should never happen until we're OK to use struct page */ |
| 1589 | static void xen_release_ptpage(unsigned long pfn, unsigned level) |
| 1590 | { |
| 1591 | struct page *page = pfn_to_page(pfn); |
| 1592 | |
| 1593 | if (PagePinned(page)) { |
| 1594 | if (!PageHighMem(page)) { |
| 1595 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
| 1596 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
| 1597 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
| 1598 | } |
| 1599 | ClearPagePinned(page); |
| 1600 | } |
| 1601 | } |
| 1602 | |
| 1603 | static void xen_release_pte(unsigned long pfn) |
| 1604 | { |
| 1605 | xen_release_ptpage(pfn, PT_PTE); |
| 1606 | } |
| 1607 | |
| 1608 | static void xen_release_pmd(unsigned long pfn) |
| 1609 | { |
| 1610 | xen_release_ptpage(pfn, PT_PMD); |
| 1611 | } |
| 1612 | |
| 1613 | #if PAGETABLE_LEVELS == 4 |
| 1614 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
| 1615 | { |
| 1616 | xen_alloc_ptpage(mm, pfn, PT_PUD); |
| 1617 | } |
| 1618 | |
| 1619 | static void xen_release_pud(unsigned long pfn) |
| 1620 | { |
| 1621 | xen_release_ptpage(pfn, PT_PUD); |
| 1622 | } |
| 1623 | #endif |
| 1624 | |
| 1625 | void __init xen_reserve_top(void) |
| 1626 | { |
| 1627 | #ifdef CONFIG_X86_32 |
| 1628 | unsigned long top = HYPERVISOR_VIRT_START; |
| 1629 | struct xen_platform_parameters pp; |
| 1630 | |
| 1631 | if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) |
| 1632 | top = pp.virt_start; |
| 1633 | |
| 1634 | reserve_top_address(-top); |
| 1635 | #endif /* CONFIG_X86_32 */ |
| 1636 | } |
| 1637 | |
| 1638 | /* |
| 1639 | * Like __va(), but returns address in the kernel mapping (which is |
| 1640 | * all we have until the physical memory mapping has been set up. |
| 1641 | */ |
| 1642 | static void *__ka(phys_addr_t paddr) |
| 1643 | { |
| 1644 | #ifdef CONFIG_X86_64 |
| 1645 | return (void *)(paddr + __START_KERNEL_map); |
| 1646 | #else |
| 1647 | return __va(paddr); |
| 1648 | #endif |
| 1649 | } |
| 1650 | |
| 1651 | /* Convert a machine address to physical address */ |
| 1652 | static unsigned long m2p(phys_addr_t maddr) |
| 1653 | { |
| 1654 | phys_addr_t paddr; |
| 1655 | |
| 1656 | maddr &= PTE_PFN_MASK; |
| 1657 | paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; |
| 1658 | |
| 1659 | return paddr; |
| 1660 | } |
| 1661 | |
| 1662 | /* Convert a machine address to kernel virtual */ |
| 1663 | static void *m2v(phys_addr_t maddr) |
| 1664 | { |
| 1665 | return __ka(m2p(maddr)); |
| 1666 | } |
| 1667 | |
Juan Quintela | 4ec5387 | 2010-09-02 15:45:43 +0100 | [diff] [blame] | 1668 | /* Set the page permissions on an identity-mapped pages */ |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1669 | static void set_page_prot(void *addr, pgprot_t prot) |
| 1670 | { |
| 1671 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; |
| 1672 | pte_t pte = pfn_pte(pfn, prot); |
| 1673 | |
| 1674 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) |
| 1675 | BUG(); |
| 1676 | } |
| 1677 | |
| 1678 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
| 1679 | { |
| 1680 | unsigned pmdidx, pteidx; |
| 1681 | unsigned ident_pte; |
| 1682 | unsigned long pfn; |
| 1683 | |
Jeremy Fitzhardinge | 764f0138 | 2010-08-26 16:23:51 -0700 | [diff] [blame] | 1684 | level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, |
| 1685 | PAGE_SIZE); |
| 1686 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1687 | ident_pte = 0; |
| 1688 | pfn = 0; |
| 1689 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { |
| 1690 | pte_t *pte_page; |
| 1691 | |
| 1692 | /* Reuse or allocate a page of ptes */ |
| 1693 | if (pmd_present(pmd[pmdidx])) |
| 1694 | pte_page = m2v(pmd[pmdidx].pmd); |
| 1695 | else { |
| 1696 | /* Check for free pte pages */ |
Jeremy Fitzhardinge | 764f0138 | 2010-08-26 16:23:51 -0700 | [diff] [blame] | 1697 | if (ident_pte == LEVEL1_IDENT_ENTRIES) |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1698 | break; |
| 1699 | |
| 1700 | pte_page = &level1_ident_pgt[ident_pte]; |
| 1701 | ident_pte += PTRS_PER_PTE; |
| 1702 | |
| 1703 | pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); |
| 1704 | } |
| 1705 | |
| 1706 | /* Install mappings */ |
| 1707 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { |
| 1708 | pte_t pte; |
| 1709 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1710 | if (!pte_none(pte_page[pteidx])) |
| 1711 | continue; |
| 1712 | |
| 1713 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); |
| 1714 | pte_page[pteidx] = pte; |
| 1715 | } |
| 1716 | } |
| 1717 | |
| 1718 | for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) |
| 1719 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); |
| 1720 | |
| 1721 | set_page_prot(pmd, PAGE_KERNEL_RO); |
| 1722 | } |
| 1723 | |
Ian Campbell | 7e77506 | 2010-09-30 12:37:26 +0100 | [diff] [blame] | 1724 | void __init xen_setup_machphys_mapping(void) |
| 1725 | { |
| 1726 | struct xen_machphys_mapping mapping; |
| 1727 | unsigned long machine_to_phys_nr_ents; |
| 1728 | |
| 1729 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { |
| 1730 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; |
| 1731 | machine_to_phys_nr_ents = mapping.max_mfn + 1; |
| 1732 | } else { |
| 1733 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; |
| 1734 | } |
| 1735 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); |
| 1736 | } |
| 1737 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1738 | #ifdef CONFIG_X86_64 |
| 1739 | static void convert_pfn_mfn(void *v) |
| 1740 | { |
| 1741 | pte_t *pte = v; |
| 1742 | int i; |
| 1743 | |
| 1744 | /* All levels are converted the same way, so just treat them |
| 1745 | as ptes. */ |
| 1746 | for (i = 0; i < PTRS_PER_PTE; i++) |
| 1747 | pte[i] = xen_make_pte(pte[i].pte); |
| 1748 | } |
| 1749 | |
| 1750 | /* |
Lucas De Marchi | 0d2eb44 | 2011-03-17 16:24:16 -0300 | [diff] [blame] | 1751 | * Set up the initial kernel pagetable. |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1752 | * |
| 1753 | * We can construct this by grafting the Xen provided pagetable into |
| 1754 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into |
| 1755 | * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This |
| 1756 | * means that only the kernel has a physical mapping to start with - |
| 1757 | * but that's enough to get __va working. We need to fill in the rest |
| 1758 | * of the physical mapping once some sort of allocator has been set |
| 1759 | * up. |
| 1760 | */ |
| 1761 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
| 1762 | unsigned long max_pfn) |
| 1763 | { |
| 1764 | pud_t *l3; |
| 1765 | pmd_t *l2; |
| 1766 | |
Stefano Stabellini | 14988a4d3 | 2011-02-18 11:32:40 +0000 | [diff] [blame] | 1767 | /* max_pfn_mapped is the last pfn mapped in the initial memory |
| 1768 | * mappings. Considering that on Xen after the kernel mappings we |
| 1769 | * have the mappings of some pages that don't exist in pfn space, we |
| 1770 | * set max_pfn_mapped to the last real pfn mapped. */ |
| 1771 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); |
| 1772 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1773 | /* Zap identity mapping */ |
| 1774 | init_level4_pgt[0] = __pgd(0); |
| 1775 | |
| 1776 | /* Pre-constructed entries are in pfn, so convert to mfn */ |
| 1777 | convert_pfn_mfn(init_level4_pgt); |
| 1778 | convert_pfn_mfn(level3_ident_pgt); |
| 1779 | convert_pfn_mfn(level3_kernel_pgt); |
| 1780 | |
| 1781 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
| 1782 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); |
| 1783 | |
| 1784 | memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); |
| 1785 | memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); |
| 1786 | |
| 1787 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); |
| 1788 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); |
| 1789 | memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); |
| 1790 | |
| 1791 | /* Set up identity map */ |
| 1792 | xen_map_identity_early(level2_ident_pgt, max_pfn); |
| 1793 | |
| 1794 | /* Make pagetable pieces RO */ |
| 1795 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); |
| 1796 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); |
| 1797 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); |
| 1798 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); |
| 1799 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
| 1800 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
| 1801 | |
| 1802 | /* Pin down new L4 */ |
| 1803 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |
| 1804 | PFN_DOWN(__pa_symbol(init_level4_pgt))); |
| 1805 | |
| 1806 | /* Unpin Xen-provided one */ |
| 1807 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
| 1808 | |
| 1809 | /* Switch over */ |
| 1810 | pgd = init_level4_pgt; |
| 1811 | |
| 1812 | /* |
| 1813 | * At this stage there can be no user pgd, and no page |
| 1814 | * structure to attach it to, so make sure we just set kernel |
| 1815 | * pgd. |
| 1816 | */ |
| 1817 | xen_mc_batch(); |
| 1818 | __xen_write_cr3(true, __pa(pgd)); |
| 1819 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
| 1820 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 1821 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1822 | __pa(xen_start_info->pt_base + |
| 1823 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
| 1824 | "XEN PAGETABLES"); |
| 1825 | |
| 1826 | return pgd; |
| 1827 | } |
| 1828 | #else /* !CONFIG_X86_64 */ |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1829 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
| 1830 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); |
| 1831 | |
| 1832 | static __init void xen_write_cr3_init(unsigned long cr3) |
| 1833 | { |
| 1834 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); |
| 1835 | |
| 1836 | BUG_ON(read_cr3() != __pa(initial_page_table)); |
| 1837 | BUG_ON(cr3 != __pa(swapper_pg_dir)); |
| 1838 | |
| 1839 | /* |
| 1840 | * We are switching to swapper_pg_dir for the first time (from |
| 1841 | * initial_page_table) and therefore need to mark that page |
| 1842 | * read-only and then pin it. |
| 1843 | * |
| 1844 | * Xen disallows sharing of kernel PMDs for PAE |
| 1845 | * guests. Therefore we must copy the kernel PMD from |
| 1846 | * initial_page_table into a new kernel PMD to be used in |
| 1847 | * swapper_pg_dir. |
| 1848 | */ |
| 1849 | swapper_kernel_pmd = |
| 1850 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); |
| 1851 | memcpy(swapper_kernel_pmd, initial_kernel_pmd, |
| 1852 | sizeof(pmd_t) * PTRS_PER_PMD); |
| 1853 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = |
| 1854 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); |
| 1855 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); |
| 1856 | |
| 1857 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); |
| 1858 | xen_write_cr3(cr3); |
| 1859 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); |
| 1860 | |
| 1861 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, |
| 1862 | PFN_DOWN(__pa(initial_page_table))); |
| 1863 | set_page_prot(initial_page_table, PAGE_KERNEL); |
| 1864 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); |
| 1865 | |
| 1866 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
| 1867 | } |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1868 | |
| 1869 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, |
| 1870 | unsigned long max_pfn) |
| 1871 | { |
| 1872 | pmd_t *kernel_pmd; |
| 1873 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1874 | initial_kernel_pmd = |
| 1875 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); |
Jeremy Fitzhardinge | f099180 | 2010-08-26 16:16:28 -0700 | [diff] [blame] | 1876 | |
Stefano Stabellini | 14988a4d3 | 2011-02-18 11:32:40 +0000 | [diff] [blame] | 1877 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1878 | |
| 1879 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1880 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1881 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1882 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1883 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1884 | memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD); |
| 1885 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
| 1886 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1887 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1888 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
| 1889 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1890 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
| 1891 | |
| 1892 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
| 1893 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1894 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
| 1895 | PFN_DOWN(__pa(initial_page_table))); |
| 1896 | xen_write_cr3(__pa(initial_page_table)); |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1897 | |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 1898 | memblock_x86_reserve_range(__pa(xen_start_info->pt_base), |
Jeremy Fitzhardinge | 33df4db | 2009-05-07 11:56:44 -0700 | [diff] [blame] | 1899 | __pa(xen_start_info->pt_base + |
| 1900 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
| 1901 | "XEN PAGETABLES"); |
| 1902 | |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 1903 | return initial_page_table; |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1904 | } |
| 1905 | #endif /* CONFIG_X86_64 */ |
| 1906 | |
Jeremy Fitzhardinge | 98511f3 | 2010-09-03 14:55:16 +0100 | [diff] [blame] | 1907 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; |
| 1908 | |
Masami Hiramatsu | 3b3809a | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 1909 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1910 | { |
| 1911 | pte_t pte; |
| 1912 | |
| 1913 | phys >>= PAGE_SHIFT; |
| 1914 | |
| 1915 | switch (idx) { |
| 1916 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: |
| 1917 | #ifdef CONFIG_X86_F00F_BUG |
| 1918 | case FIX_F00F_IDT: |
| 1919 | #endif |
| 1920 | #ifdef CONFIG_X86_32 |
| 1921 | case FIX_WP_TEST: |
| 1922 | case FIX_VDSO: |
| 1923 | # ifdef CONFIG_HIGHMEM |
| 1924 | case FIX_KMAP_BEGIN ... FIX_KMAP_END: |
| 1925 | # endif |
| 1926 | #else |
| 1927 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: |
| 1928 | #endif |
Jeremy Fitzhardinge | 3ecb1b7 | 2009-03-07 23:48:41 -0800 | [diff] [blame] | 1929 | case FIX_TEXT_POKE0: |
| 1930 | case FIX_TEXT_POKE1: |
| 1931 | /* All local page mappings */ |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1932 | pte = pfn_pte(phys, prot); |
| 1933 | break; |
| 1934 | |
Jeremy Fitzhardinge | 98511f3 | 2010-09-03 14:55:16 +0100 | [diff] [blame] | 1935 | #ifdef CONFIG_X86_LOCAL_APIC |
| 1936 | case FIX_APIC_BASE: /* maps dummy local APIC */ |
| 1937 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); |
| 1938 | break; |
| 1939 | #endif |
| 1940 | |
| 1941 | #ifdef CONFIG_X86_IO_APIC |
| 1942 | case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: |
| 1943 | /* |
| 1944 | * We just don't map the IO APIC - all access is via |
| 1945 | * hypercalls. Keep the address in the pte for reference. |
| 1946 | */ |
| 1947 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); |
| 1948 | break; |
| 1949 | #endif |
| 1950 | |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 1951 | case FIX_PARAVIRT_BOOTMAP: |
| 1952 | /* This is an MFN, but it isn't an IO mapping from the |
| 1953 | IO domain */ |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1954 | pte = mfn_pte(phys, prot); |
| 1955 | break; |
Jeremy Fitzhardinge | c0011db | 2010-02-04 14:46:34 -0800 | [diff] [blame] | 1956 | |
| 1957 | default: |
| 1958 | /* By default, set_fixmap is used for hardware mappings */ |
| 1959 | pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); |
| 1960 | break; |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1961 | } |
| 1962 | |
| 1963 | __native_set_fixmap(idx, pte); |
| 1964 | |
| 1965 | #ifdef CONFIG_X86_64 |
| 1966 | /* Replicate changes to map the vsyscall page into the user |
| 1967 | pagetable vsyscall mapping. */ |
| 1968 | if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { |
| 1969 | unsigned long vaddr = __fix_to_virt(idx); |
| 1970 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); |
| 1971 | } |
| 1972 | #endif |
| 1973 | } |
| 1974 | |
Juan Quintela | 4ec5387 | 2010-09-02 15:45:43 +0100 | [diff] [blame] | 1975 | __init void xen_ident_map_ISA(void) |
| 1976 | { |
| 1977 | unsigned long pa; |
| 1978 | |
| 1979 | /* |
| 1980 | * If we're dom0, then linear map the ISA machine addresses into |
| 1981 | * the kernel's address space. |
| 1982 | */ |
| 1983 | if (!xen_initial_domain()) |
| 1984 | return; |
| 1985 | |
| 1986 | xen_raw_printk("Xen: setup ISA identity maps\n"); |
| 1987 | |
| 1988 | for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) { |
| 1989 | pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO); |
| 1990 | |
| 1991 | if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0)) |
| 1992 | BUG(); |
| 1993 | } |
| 1994 | |
| 1995 | xen_flush_tlb(); |
| 1996 | } |
| 1997 | |
Thomas Gleixner | f1d7062 | 2009-08-20 13:13:52 +0200 | [diff] [blame] | 1998 | static __init void xen_post_allocator_init(void) |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 1999 | { |
Konrad Rzeszutek Wilk | fc25151 | 2010-12-23 16:25:29 -0500 | [diff] [blame] | 2000 | #ifdef CONFIG_XEN_DEBUG |
| 2001 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); |
| 2002 | #endif |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2003 | pv_mmu_ops.set_pte = xen_set_pte; |
| 2004 | pv_mmu_ops.set_pmd = xen_set_pmd; |
| 2005 | pv_mmu_ops.set_pud = xen_set_pud; |
| 2006 | #if PAGETABLE_LEVELS == 4 |
| 2007 | pv_mmu_ops.set_pgd = xen_set_pgd; |
| 2008 | #endif |
| 2009 | |
| 2010 | /* This will work as long as patching hasn't happened yet |
| 2011 | (which it hasn't) */ |
| 2012 | pv_mmu_ops.alloc_pte = xen_alloc_pte; |
| 2013 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; |
| 2014 | pv_mmu_ops.release_pte = xen_release_pte; |
| 2015 | pv_mmu_ops.release_pmd = xen_release_pmd; |
| 2016 | #if PAGETABLE_LEVELS == 4 |
| 2017 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
| 2018 | pv_mmu_ops.release_pud = xen_release_pud; |
| 2019 | #endif |
| 2020 | |
| 2021 | #ifdef CONFIG_X86_64 |
| 2022 | SetPagePinned(virt_to_page(level3_user_vsyscall)); |
| 2023 | #endif |
| 2024 | xen_mark_init_mm_pinned(); |
| 2025 | } |
| 2026 | |
Jeremy Fitzhardinge | b407fc5 | 2009-02-17 23:46:21 -0800 | [diff] [blame] | 2027 | static void xen_leave_lazy_mmu(void) |
| 2028 | { |
Jeremy Fitzhardinge | 5caecb9 | 2009-02-20 23:01:26 -0800 | [diff] [blame] | 2029 | preempt_disable(); |
Jeremy Fitzhardinge | b407fc5 | 2009-02-17 23:46:21 -0800 | [diff] [blame] | 2030 | xen_mc_flush(); |
| 2031 | paravirt_leave_lazy_mmu(); |
Jeremy Fitzhardinge | 5caecb9 | 2009-02-20 23:01:26 -0800 | [diff] [blame] | 2032 | preempt_enable(); |
Jeremy Fitzhardinge | b407fc5 | 2009-02-17 23:46:21 -0800 | [diff] [blame] | 2033 | } |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2034 | |
Thomas Gleixner | 030cb6c | 2009-08-20 14:30:02 +0200 | [diff] [blame] | 2035 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2036 | .read_cr2 = xen_read_cr2, |
| 2037 | .write_cr2 = xen_write_cr2, |
| 2038 | |
| 2039 | .read_cr3 = xen_read_cr3, |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 2040 | #ifdef CONFIG_X86_32 |
| 2041 | .write_cr3 = xen_write_cr3_init, |
| 2042 | #else |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2043 | .write_cr3 = xen_write_cr3, |
Ian Campbell | 5b5c1af | 2010-11-24 12:09:41 +0000 | [diff] [blame] | 2044 | #endif |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2045 | |
| 2046 | .flush_tlb_user = xen_flush_tlb, |
| 2047 | .flush_tlb_kernel = xen_flush_tlb, |
| 2048 | .flush_tlb_single = xen_flush_tlb_single, |
| 2049 | .flush_tlb_others = xen_flush_tlb_others, |
| 2050 | |
| 2051 | .pte_update = paravirt_nop, |
| 2052 | .pte_update_defer = paravirt_nop, |
| 2053 | |
| 2054 | .pgd_alloc = xen_pgd_alloc, |
| 2055 | .pgd_free = xen_pgd_free, |
| 2056 | |
| 2057 | .alloc_pte = xen_alloc_pte_init, |
| 2058 | .release_pte = xen_release_pte_init, |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 2059 | .alloc_pmd = xen_alloc_pmd_init, |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 2060 | .release_pmd = xen_release_pmd_init, |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2061 | |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2062 | .set_pte = xen_set_pte_init, |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2063 | .set_pte_at = xen_set_pte_at, |
| 2064 | .set_pmd = xen_set_pmd_hyper, |
| 2065 | |
| 2066 | .ptep_modify_prot_start = __ptep_modify_prot_start, |
| 2067 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, |
| 2068 | |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 2069 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
| 2070 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2071 | |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 2072 | .make_pte = PV_CALLEE_SAVE(xen_make_pte), |
| 2073 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2074 | |
| 2075 | #ifdef CONFIG_X86_PAE |
| 2076 | .set_pte_atomic = xen_set_pte_atomic, |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2077 | .pte_clear = xen_pte_clear, |
| 2078 | .pmd_clear = xen_pmd_clear, |
| 2079 | #endif /* CONFIG_X86_PAE */ |
| 2080 | .set_pud = xen_set_pud_hyper, |
| 2081 | |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 2082 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), |
| 2083 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2084 | |
| 2085 | #if PAGETABLE_LEVELS == 4 |
Jeremy Fitzhardinge | da5de7c | 2009-01-28 14:35:07 -0800 | [diff] [blame] | 2086 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), |
| 2087 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2088 | .set_pgd = xen_set_pgd_hyper, |
| 2089 | |
Jeremy Fitzhardinge | b96229b | 2009-03-17 13:30:55 -0700 | [diff] [blame] | 2090 | .alloc_pud = xen_alloc_pmd_init, |
| 2091 | .release_pud = xen_release_pmd_init, |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2092 | #endif /* PAGETABLE_LEVELS == 4 */ |
| 2093 | |
| 2094 | .activate_mm = xen_activate_mm, |
| 2095 | .dup_mmap = xen_dup_mmap, |
| 2096 | .exit_mmap = xen_exit_mmap, |
| 2097 | |
| 2098 | .lazy_mode = { |
| 2099 | .enter = paravirt_enter_lazy_mmu, |
Jeremy Fitzhardinge | b407fc5 | 2009-02-17 23:46:21 -0800 | [diff] [blame] | 2100 | .leave = xen_leave_lazy_mmu, |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2101 | }, |
| 2102 | |
| 2103 | .set_fixmap = xen_set_fixmap, |
| 2104 | }; |
| 2105 | |
Thomas Gleixner | 030cb6c | 2009-08-20 14:30:02 +0200 | [diff] [blame] | 2106 | void __init xen_init_mmu_ops(void) |
| 2107 | { |
| 2108 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
| 2109 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; |
| 2110 | pv_mmu_ops = xen_mmu_ops; |
Jeremy Fitzhardinge | d2cb214 | 2010-03-26 15:37:50 -0700 | [diff] [blame] | 2111 | |
Jeremy Fitzhardinge | 98511f3 | 2010-09-03 14:55:16 +0100 | [diff] [blame] | 2112 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
Thomas Gleixner | 030cb6c | 2009-08-20 14:30:02 +0200 | [diff] [blame] | 2113 | } |
Jeremy Fitzhardinge | 319f3ba | 2009-01-28 14:35:01 -0800 | [diff] [blame] | 2114 | |
Alex Nixon | 08bbc9d | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 2115 | /* Protected by xen_reservation_lock. */ |
| 2116 | #define MAX_CONTIG_ORDER 9 /* 2MB */ |
| 2117 | static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; |
| 2118 | |
| 2119 | #define VOID_PTE (mfn_pte(0, __pgprot(0))) |
| 2120 | static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, |
| 2121 | unsigned long *in_frames, |
| 2122 | unsigned long *out_frames) |
| 2123 | { |
| 2124 | int i; |
| 2125 | struct multicall_space mcs; |
| 2126 | |
| 2127 | xen_mc_batch(); |
| 2128 | for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { |
| 2129 | mcs = __xen_mc_entry(0); |
| 2130 | |
| 2131 | if (in_frames) |
| 2132 | in_frames[i] = virt_to_mfn(vaddr); |
| 2133 | |
| 2134 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 2135 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); |
Alex Nixon | 08bbc9d | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 2136 | |
| 2137 | if (out_frames) |
| 2138 | out_frames[i] = virt_to_pfn(vaddr); |
| 2139 | } |
| 2140 | xen_mc_issue(0); |
| 2141 | } |
| 2142 | |
| 2143 | /* |
| 2144 | * Update the pfn-to-mfn mappings for a virtual address range, either to |
| 2145 | * point to an array of mfns, or contiguously from a single starting |
| 2146 | * mfn. |
| 2147 | */ |
| 2148 | static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, |
| 2149 | unsigned long *mfns, |
| 2150 | unsigned long first_mfn) |
| 2151 | { |
| 2152 | unsigned i, limit; |
| 2153 | unsigned long mfn; |
| 2154 | |
| 2155 | xen_mc_batch(); |
| 2156 | |
| 2157 | limit = 1u << order; |
| 2158 | for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { |
| 2159 | struct multicall_space mcs; |
| 2160 | unsigned flags; |
| 2161 | |
| 2162 | mcs = __xen_mc_entry(0); |
| 2163 | if (mfns) |
| 2164 | mfn = mfns[i]; |
| 2165 | else |
| 2166 | mfn = first_mfn + i; |
| 2167 | |
| 2168 | if (i < (limit - 1)) |
| 2169 | flags = 0; |
| 2170 | else { |
| 2171 | if (order == 0) |
| 2172 | flags = UVMF_INVLPG | UVMF_ALL; |
| 2173 | else |
| 2174 | flags = UVMF_TLB_FLUSH | UVMF_ALL; |
| 2175 | } |
| 2176 | |
| 2177 | MULTI_update_va_mapping(mcs.mc, vaddr, |
| 2178 | mfn_pte(mfn, PAGE_KERNEL), flags); |
| 2179 | |
| 2180 | set_phys_to_machine(virt_to_pfn(vaddr), mfn); |
| 2181 | } |
| 2182 | |
| 2183 | xen_mc_issue(0); |
| 2184 | } |
| 2185 | |
| 2186 | /* |
| 2187 | * Perform the hypercall to exchange a region of our pfns to point to |
| 2188 | * memory with the required contiguous alignment. Takes the pfns as |
| 2189 | * input, and populates mfns as output. |
| 2190 | * |
| 2191 | * Returns a success code indicating whether the hypervisor was able to |
| 2192 | * satisfy the request or not. |
| 2193 | */ |
| 2194 | static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, |
| 2195 | unsigned long *pfns_in, |
| 2196 | unsigned long extents_out, |
| 2197 | unsigned int order_out, |
| 2198 | unsigned long *mfns_out, |
| 2199 | unsigned int address_bits) |
| 2200 | { |
| 2201 | long rc; |
| 2202 | int success; |
| 2203 | |
| 2204 | struct xen_memory_exchange exchange = { |
| 2205 | .in = { |
| 2206 | .nr_extents = extents_in, |
| 2207 | .extent_order = order_in, |
| 2208 | .extent_start = pfns_in, |
| 2209 | .domid = DOMID_SELF |
| 2210 | }, |
| 2211 | .out = { |
| 2212 | .nr_extents = extents_out, |
| 2213 | .extent_order = order_out, |
| 2214 | .extent_start = mfns_out, |
| 2215 | .address_bits = address_bits, |
| 2216 | .domid = DOMID_SELF |
| 2217 | } |
| 2218 | }; |
| 2219 | |
| 2220 | BUG_ON(extents_in << order_in != extents_out << order_out); |
| 2221 | |
| 2222 | rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); |
| 2223 | success = (exchange.nr_exchanged == extents_in); |
| 2224 | |
| 2225 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); |
| 2226 | BUG_ON(success && (rc != 0)); |
| 2227 | |
| 2228 | return success; |
| 2229 | } |
| 2230 | |
| 2231 | int xen_create_contiguous_region(unsigned long vstart, unsigned int order, |
| 2232 | unsigned int address_bits) |
| 2233 | { |
| 2234 | unsigned long *in_frames = discontig_frames, out_frame; |
| 2235 | unsigned long flags; |
| 2236 | int success; |
| 2237 | |
| 2238 | /* |
| 2239 | * Currently an auto-translated guest will not perform I/O, nor will |
| 2240 | * it require PAE page directories below 4GB. Therefore any calls to |
| 2241 | * this function are redundant and can be ignored. |
| 2242 | */ |
| 2243 | |
| 2244 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
| 2245 | return 0; |
| 2246 | |
| 2247 | if (unlikely(order > MAX_CONTIG_ORDER)) |
| 2248 | return -ENOMEM; |
| 2249 | |
| 2250 | memset((void *) vstart, 0, PAGE_SIZE << order); |
| 2251 | |
Alex Nixon | 08bbc9d | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 2252 | spin_lock_irqsave(&xen_reservation_lock, flags); |
| 2253 | |
| 2254 | /* 1. Zap current PTEs, remembering MFNs. */ |
| 2255 | xen_zap_pfn_range(vstart, order, in_frames, NULL); |
| 2256 | |
| 2257 | /* 2. Get a new contiguous memory extent. */ |
| 2258 | out_frame = virt_to_pfn(vstart); |
| 2259 | success = xen_exchange_memory(1UL << order, 0, in_frames, |
| 2260 | 1, order, &out_frame, |
| 2261 | address_bits); |
| 2262 | |
| 2263 | /* 3. Map the new extent in place of old pages. */ |
| 2264 | if (success) |
| 2265 | xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); |
| 2266 | else |
| 2267 | xen_remap_exchanged_ptes(vstart, order, in_frames, 0); |
| 2268 | |
| 2269 | spin_unlock_irqrestore(&xen_reservation_lock, flags); |
| 2270 | |
| 2271 | return success ? 0 : -ENOMEM; |
| 2272 | } |
| 2273 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); |
| 2274 | |
| 2275 | void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) |
| 2276 | { |
| 2277 | unsigned long *out_frames = discontig_frames, in_frame; |
| 2278 | unsigned long flags; |
| 2279 | int success; |
| 2280 | |
| 2281 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
| 2282 | return; |
| 2283 | |
| 2284 | if (unlikely(order > MAX_CONTIG_ORDER)) |
| 2285 | return; |
| 2286 | |
| 2287 | memset((void *) vstart, 0, PAGE_SIZE << order); |
| 2288 | |
Alex Nixon | 08bbc9d | 2009-02-09 12:05:46 -0800 | [diff] [blame] | 2289 | spin_lock_irqsave(&xen_reservation_lock, flags); |
| 2290 | |
| 2291 | /* 1. Find start MFN of contiguous extent. */ |
| 2292 | in_frame = virt_to_mfn(vstart); |
| 2293 | |
| 2294 | /* 2. Zap current PTEs. */ |
| 2295 | xen_zap_pfn_range(vstart, order, NULL, out_frames); |
| 2296 | |
| 2297 | /* 3. Do the exchange for non-contiguous MFNs. */ |
| 2298 | success = xen_exchange_memory(1, order, &in_frame, 1UL << order, |
| 2299 | 0, out_frames, 0); |
| 2300 | |
| 2301 | /* 4. Map new pages in place of old pages. */ |
| 2302 | if (success) |
| 2303 | xen_remap_exchanged_ptes(vstart, order, out_frames, 0); |
| 2304 | else |
| 2305 | xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); |
| 2306 | |
| 2307 | spin_unlock_irqrestore(&xen_reservation_lock, flags); |
| 2308 | } |
| 2309 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); |
| 2310 | |
Stefano Stabellini | ca65f9f | 2010-07-29 14:37:48 +0100 | [diff] [blame] | 2311 | #ifdef CONFIG_XEN_PVHVM |
Stefano Stabellini | 5915100 | 2010-06-17 14:22:52 +0100 | [diff] [blame] | 2312 | static void xen_hvm_exit_mmap(struct mm_struct *mm) |
| 2313 | { |
| 2314 | struct xen_hvm_pagetable_dying a; |
| 2315 | int rc; |
| 2316 | |
| 2317 | a.domid = DOMID_SELF; |
| 2318 | a.gpa = __pa(mm->pgd); |
| 2319 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); |
| 2320 | WARN_ON_ONCE(rc < 0); |
| 2321 | } |
| 2322 | |
| 2323 | static int is_pagetable_dying_supported(void) |
| 2324 | { |
| 2325 | struct xen_hvm_pagetable_dying a; |
| 2326 | int rc = 0; |
| 2327 | |
| 2328 | a.domid = DOMID_SELF; |
| 2329 | a.gpa = 0x00; |
| 2330 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); |
| 2331 | if (rc < 0) { |
| 2332 | printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); |
| 2333 | return 0; |
| 2334 | } |
| 2335 | return 1; |
| 2336 | } |
| 2337 | |
| 2338 | void __init xen_hvm_init_mmu_ops(void) |
| 2339 | { |
| 2340 | if (is_pagetable_dying_supported()) |
| 2341 | pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; |
| 2342 | } |
Stefano Stabellini | ca65f9f | 2010-07-29 14:37:48 +0100 | [diff] [blame] | 2343 | #endif |
Stefano Stabellini | 5915100 | 2010-06-17 14:22:52 +0100 | [diff] [blame] | 2344 | |
Ian Campbell | de1ef20 | 2009-05-21 10:09:46 +0100 | [diff] [blame] | 2345 | #define REMAP_BATCH_SIZE 16 |
| 2346 | |
| 2347 | struct remap_data { |
| 2348 | unsigned long mfn; |
| 2349 | pgprot_t prot; |
| 2350 | struct mmu_update *mmu_update; |
| 2351 | }; |
| 2352 | |
| 2353 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, |
| 2354 | unsigned long addr, void *data) |
| 2355 | { |
| 2356 | struct remap_data *rmd = data; |
| 2357 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); |
| 2358 | |
| 2359 | rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; |
| 2360 | rmd->mmu_update->val = pte_val_ma(pte); |
| 2361 | rmd->mmu_update++; |
| 2362 | |
| 2363 | return 0; |
| 2364 | } |
| 2365 | |
| 2366 | int xen_remap_domain_mfn_range(struct vm_area_struct *vma, |
| 2367 | unsigned long addr, |
| 2368 | unsigned long mfn, int nr, |
| 2369 | pgprot_t prot, unsigned domid) |
| 2370 | { |
| 2371 | struct remap_data rmd; |
| 2372 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; |
| 2373 | int batch; |
| 2374 | unsigned long range; |
| 2375 | int err = 0; |
| 2376 | |
| 2377 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
| 2378 | |
Stefano Stabellini | e060e7af | 2010-11-11 12:37:43 -0800 | [diff] [blame] | 2379 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == |
| 2380 | (VM_PFNMAP | VM_RESERVED | VM_IO))); |
Ian Campbell | de1ef20 | 2009-05-21 10:09:46 +0100 | [diff] [blame] | 2381 | |
| 2382 | rmd.mfn = mfn; |
| 2383 | rmd.prot = prot; |
| 2384 | |
| 2385 | while (nr) { |
| 2386 | batch = min(REMAP_BATCH_SIZE, nr); |
| 2387 | range = (unsigned long)batch << PAGE_SHIFT; |
| 2388 | |
| 2389 | rmd.mmu_update = mmu_update; |
| 2390 | err = apply_to_page_range(vma->vm_mm, addr, range, |
| 2391 | remap_area_mfn_pte_fn, &rmd); |
| 2392 | if (err) |
| 2393 | goto out; |
| 2394 | |
| 2395 | err = -EFAULT; |
| 2396 | if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) |
| 2397 | goto out; |
| 2398 | |
| 2399 | nr -= batch; |
| 2400 | addr += range; |
| 2401 | } |
| 2402 | |
| 2403 | err = 0; |
| 2404 | out: |
| 2405 | |
| 2406 | flush_tlb_all(); |
| 2407 | |
| 2408 | return err; |
| 2409 | } |
| 2410 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); |
| 2411 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 2412 | #ifdef CONFIG_XEN_DEBUG_FS |
| 2413 | |
Konrad Rzeszutek Wilk | 2222e71 | 2010-12-22 08:57:30 -0500 | [diff] [blame] | 2414 | static int p2m_dump_open(struct inode *inode, struct file *filp) |
| 2415 | { |
| 2416 | return single_open(filp, p2m_dump_show, NULL); |
| 2417 | } |
| 2418 | |
| 2419 | static const struct file_operations p2m_dump_fops = { |
| 2420 | .open = p2m_dump_open, |
| 2421 | .read = seq_read, |
| 2422 | .llseek = seq_lseek, |
| 2423 | .release = single_release, |
| 2424 | }; |
| 2425 | |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 2426 | static struct dentry *d_mmu_debug; |
| 2427 | |
| 2428 | static int __init xen_mmu_debugfs(void) |
| 2429 | { |
| 2430 | struct dentry *d_xen = xen_init_debugfs(); |
| 2431 | |
| 2432 | if (d_xen == NULL) |
| 2433 | return -ENOMEM; |
| 2434 | |
| 2435 | d_mmu_debug = debugfs_create_dir("mmu", d_xen); |
| 2436 | |
| 2437 | debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); |
| 2438 | |
| 2439 | debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); |
| 2440 | debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, |
| 2441 | &mmu_stats.pgd_update_pinned); |
| 2442 | debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, |
| 2443 | &mmu_stats.pgd_update_pinned); |
| 2444 | |
| 2445 | debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); |
| 2446 | debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, |
| 2447 | &mmu_stats.pud_update_pinned); |
| 2448 | debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, |
| 2449 | &mmu_stats.pud_update_pinned); |
| 2450 | |
| 2451 | debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); |
| 2452 | debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, |
| 2453 | &mmu_stats.pmd_update_pinned); |
| 2454 | debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, |
| 2455 | &mmu_stats.pmd_update_pinned); |
| 2456 | |
| 2457 | debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); |
| 2458 | // debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, |
| 2459 | // &mmu_stats.pte_update_pinned); |
| 2460 | debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, |
| 2461 | &mmu_stats.pte_update_pinned); |
| 2462 | |
| 2463 | debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); |
| 2464 | debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, |
| 2465 | &mmu_stats.mmu_update_extended); |
| 2466 | xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, |
| 2467 | mmu_stats.mmu_update_histo, 20); |
| 2468 | |
| 2469 | debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); |
| 2470 | debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, |
| 2471 | &mmu_stats.set_pte_at_batched); |
| 2472 | debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, |
| 2473 | &mmu_stats.set_pte_at_current); |
| 2474 | debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, |
| 2475 | &mmu_stats.set_pte_at_kernel); |
| 2476 | |
| 2477 | debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); |
| 2478 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, |
| 2479 | &mmu_stats.prot_commit_batched); |
| 2480 | |
Konrad Rzeszutek Wilk | 2222e71 | 2010-12-22 08:57:30 -0500 | [diff] [blame] | 2481 | debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); |
Jeremy Fitzhardinge | 994025c | 2008-08-20 17:02:19 -0700 | [diff] [blame] | 2482 | return 0; |
| 2483 | } |
| 2484 | fs_initcall(xen_mmu_debugfs); |
| 2485 | |
| 2486 | #endif /* CONFIG_XEN_DEBUG_FS */ |