Rusty Russell | f938d2c | 2007-07-26 10:41:02 -0700 | [diff] [blame] | 1 | /*P:700 The pagetable code, on the other hand, still shows the scars of |
| 2 | * previous encounters. It's functional, and as neat as it can be in the |
| 3 | * circumstances, but be wary, for these things are subtle and break easily. |
| 4 | * The Guest provides a virtual to physical mapping, but we can neither trust |
| 5 | * it nor use it: we verify and convert it here to point the hardware to the |
| 6 | * actual Guest pages when running the Guest. :*/ |
| 7 | |
| 8 | /* Copyright (C) Rusty Russell IBM Corporation 2006. |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 9 | * GPL v2 and any later version */ |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/types.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/random.h> |
| 14 | #include <linux/percpu.h> |
| 15 | #include <asm/tlbflush.h> |
| 16 | #include "lg.h" |
| 17 | |
Rusty Russell | f56a384 | 2007-07-26 10:41:05 -0700 | [diff] [blame] | 18 | /*M:008 We hold reference to pages, which prevents them from being swapped. |
| 19 | * It'd be nice to have a callback in the "struct mm_struct" when Linux wants |
| 20 | * to swap out. If we had this, and a shrinker callback to trim PTE pages, we |
| 21 | * could probably consider launching Guests as non-root. :*/ |
| 22 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 23 | /*H:300 |
| 24 | * The Page Table Code |
| 25 | * |
| 26 | * We use two-level page tables for the Guest. If you're not entirely |
| 27 | * comfortable with virtual addresses, physical addresses and page tables then |
| 28 | * I recommend you review lguest.c's "Page Table Handling" (with diagrams!). |
| 29 | * |
| 30 | * The Guest keeps page tables, but we maintain the actual ones here: these are |
| 31 | * called "shadow" page tables. Which is a very Guest-centric name: these are |
| 32 | * the real page tables the CPU uses, although we keep them up to date to |
| 33 | * reflect the Guest's. (See what I mean about weird naming? Since when do |
| 34 | * shadows reflect anything?) |
| 35 | * |
| 36 | * Anyway, this is the most complicated part of the Host code. There are seven |
| 37 | * parts to this: |
| 38 | * (i) Setting up a page table entry for the Guest when it faults, |
| 39 | * (ii) Setting up the page table entry for the Guest stack, |
| 40 | * (iii) Setting up a page table entry when the Guest tells us it has changed, |
| 41 | * (iv) Switching page tables, |
| 42 | * (v) Flushing (thowing away) page tables, |
| 43 | * (vi) Mapping the Switcher when the Guest is about to run, |
| 44 | * (vii) Setting up the page tables initially. |
| 45 | :*/ |
| 46 | |
| 47 | /* Pages a 4k long, and each page table entry is 4 bytes long, giving us 1024 |
| 48 | * (or 2^10) entries per page. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 49 | #define PTES_PER_PAGE_SHIFT 10 |
| 50 | #define PTES_PER_PAGE (1 << PTES_PER_PAGE_SHIFT) |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 51 | |
| 52 | /* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is |
| 53 | * conveniently placed at the top 4MB, so it uses a separate, complete PTE |
| 54 | * page. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 55 | #define SWITCHER_PGD_INDEX (PTES_PER_PAGE - 1) |
| 56 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 57 | /* We actually need a separate PTE page for each CPU. Remember that after the |
| 58 | * Switcher code itself comes two pages for each CPU, and we don't want this |
| 59 | * CPU's guest to see the pages of any other CPU. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 60 | static DEFINE_PER_CPU(spte_t *, switcher_pte_pages); |
| 61 | #define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) |
| 62 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 63 | /*H:320 With our shadow and Guest types established, we need to deal with |
| 64 | * them: the page table code is curly enough to need helper functions to keep |
| 65 | * it clear and clean. |
| 66 | * |
| 67 | * The first helper takes a virtual address, and says which entry in the top |
| 68 | * level page table deals with that address. Since each top level entry deals |
| 69 | * with 4M, this effectively divides by 4M. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 70 | static unsigned vaddr_to_pgd_index(unsigned long vaddr) |
| 71 | { |
| 72 | return vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); |
| 73 | } |
| 74 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 75 | /* There are two functions which return pointers to the shadow (aka "real") |
| 76 | * page tables. |
| 77 | * |
| 78 | * spgd_addr() takes the virtual address and returns a pointer to the top-level |
| 79 | * page directory entry for that address. Since we keep track of several page |
| 80 | * tables, the "i" argument tells us which one we're interested in (it's |
| 81 | * usually the current one). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 82 | static spgd_t *spgd_addr(struct lguest *lg, u32 i, unsigned long vaddr) |
| 83 | { |
| 84 | unsigned int index = vaddr_to_pgd_index(vaddr); |
| 85 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 86 | /* We kill any Guest trying to touch the Switcher addresses. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 87 | if (index >= SWITCHER_PGD_INDEX) { |
| 88 | kill_guest(lg, "attempt to access switcher pages"); |
| 89 | index = 0; |
| 90 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 91 | /* Return a pointer index'th pgd entry for the i'th page table. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 92 | return &lg->pgdirs[i].pgdir[index]; |
| 93 | } |
| 94 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 95 | /* This routine then takes the PGD entry given above, which contains the |
| 96 | * address of the PTE page. It then returns a pointer to the PTE entry for the |
| 97 | * given address. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 98 | static spte_t *spte_addr(struct lguest *lg, spgd_t spgd, unsigned long vaddr) |
| 99 | { |
| 100 | spte_t *page = __va(spgd.pfn << PAGE_SHIFT); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 101 | /* You should never call this if the PGD entry wasn't valid */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 102 | BUG_ON(!(spgd.flags & _PAGE_PRESENT)); |
| 103 | return &page[(vaddr >> PAGE_SHIFT) % PTES_PER_PAGE]; |
| 104 | } |
| 105 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 106 | /* These two functions just like the above two, except they access the Guest |
| 107 | * page tables. Hence they return a Guest address. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 108 | static unsigned long gpgd_addr(struct lguest *lg, unsigned long vaddr) |
| 109 | { |
| 110 | unsigned int index = vaddr >> (PAGE_SHIFT + PTES_PER_PAGE_SHIFT); |
| 111 | return lg->pgdirs[lg->pgdidx].cr3 + index * sizeof(gpgd_t); |
| 112 | } |
| 113 | |
| 114 | static unsigned long gpte_addr(struct lguest *lg, |
| 115 | gpgd_t gpgd, unsigned long vaddr) |
| 116 | { |
| 117 | unsigned long gpage = gpgd.pfn << PAGE_SHIFT; |
| 118 | BUG_ON(!(gpgd.flags & _PAGE_PRESENT)); |
| 119 | return gpage + ((vaddr>>PAGE_SHIFT) % PTES_PER_PAGE) * sizeof(gpte_t); |
| 120 | } |
| 121 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 122 | /*H:350 This routine takes a page number given by the Guest and converts it to |
| 123 | * an actual, physical page number. It can fail for several reasons: the |
| 124 | * virtual address might not be mapped by the Launcher, the write flag is set |
| 125 | * and the page is read-only, or the write flag was set and the page was |
| 126 | * shared so had to be copied, but we ran out of memory. |
| 127 | * |
| 128 | * This holds a reference to the page, so release_pte() is careful to |
| 129 | * put that back. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 130 | static unsigned long get_pfn(unsigned long virtpfn, int write) |
| 131 | { |
| 132 | struct page *page; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 133 | /* This value indicates failure. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 134 | unsigned long ret = -1UL; |
| 135 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 136 | /* get_user_pages() is a complex interface: it gets the "struct |
| 137 | * vm_area_struct" and "struct page" assocated with a range of pages. |
| 138 | * It also needs the task's mmap_sem held, and is not very quick. |
| 139 | * It returns the number of pages it got. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 140 | down_read(¤t->mm->mmap_sem); |
| 141 | if (get_user_pages(current, current->mm, virtpfn << PAGE_SHIFT, |
| 142 | 1, write, 1, &page, NULL) == 1) |
| 143 | ret = page_to_pfn(page); |
| 144 | up_read(¤t->mm->mmap_sem); |
| 145 | return ret; |
| 146 | } |
| 147 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 148 | /*H:340 Converting a Guest page table entry to a shadow (ie. real) page table |
| 149 | * entry can be a little tricky. The flags are (almost) the same, but the |
| 150 | * Guest PTE contains a virtual page number: the CPU needs the real page |
| 151 | * number. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 152 | static spte_t gpte_to_spte(struct lguest *lg, gpte_t gpte, int write) |
| 153 | { |
| 154 | spte_t spte; |
Rusty Russell | 3c6b5bf | 2007-10-22 11:03:26 +1000 | [diff] [blame^] | 155 | unsigned long pfn, base; |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 156 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 157 | /* The Guest sets the global flag, because it thinks that it is using |
| 158 | * PGE. We only told it to use PGE so it would tell us whether it was |
| 159 | * flushing a kernel mapping or a userspace mapping. We don't actually |
| 160 | * use the global bit, so throw it away. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 161 | spte.flags = (gpte.flags & ~_PAGE_GLOBAL); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 162 | |
Rusty Russell | 3c6b5bf | 2007-10-22 11:03:26 +1000 | [diff] [blame^] | 163 | /* The Guest's pages are offset inside the Launcher. */ |
| 164 | base = (unsigned long)lg->mem_base / PAGE_SIZE; |
| 165 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 166 | /* We need a temporary "unsigned long" variable to hold the answer from |
| 167 | * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't |
| 168 | * fit in spte.pfn. get_pfn() finds the real physical number of the |
| 169 | * page, given the virtual number. */ |
Rusty Russell | 3c6b5bf | 2007-10-22 11:03:26 +1000 | [diff] [blame^] | 170 | pfn = get_pfn(base + gpte.pfn, write); |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 171 | if (pfn == -1UL) { |
| 172 | kill_guest(lg, "failed to get page %u", gpte.pfn); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 173 | /* When we destroy the Guest, we'll go through the shadow page |
| 174 | * tables and release_pte() them. Make sure we don't think |
| 175 | * this one is valid! */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 176 | spte.flags = 0; |
| 177 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 178 | /* Now we assign the page number, and our shadow PTE is complete. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 179 | spte.pfn = pfn; |
| 180 | return spte; |
| 181 | } |
| 182 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 183 | /*H:460 And to complete the chain, release_pte() looks like this: */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 184 | static void release_pte(spte_t pte) |
| 185 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 186 | /* Remember that get_user_pages() took a reference to the page, in |
| 187 | * get_pfn()? We have to put it back now. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 188 | if (pte.flags & _PAGE_PRESENT) |
| 189 | put_page(pfn_to_page(pte.pfn)); |
| 190 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 191 | /*:*/ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 192 | |
| 193 | static void check_gpte(struct lguest *lg, gpte_t gpte) |
| 194 | { |
| 195 | if ((gpte.flags & (_PAGE_PWT|_PAGE_PSE)) || gpte.pfn >= lg->pfn_limit) |
| 196 | kill_guest(lg, "bad page table entry"); |
| 197 | } |
| 198 | |
| 199 | static void check_gpgd(struct lguest *lg, gpgd_t gpgd) |
| 200 | { |
| 201 | if ((gpgd.flags & ~_PAGE_TABLE) || gpgd.pfn >= lg->pfn_limit) |
| 202 | kill_guest(lg, "bad page directory entry"); |
| 203 | } |
| 204 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 205 | /*H:330 |
| 206 | * (i) Setting up a page table entry for the Guest when it faults |
| 207 | * |
| 208 | * We saw this call in run_guest(): when we see a page fault in the Guest, we |
| 209 | * come here. That's because we only set up the shadow page tables lazily as |
| 210 | * they're needed, so we get page faults all the time and quietly fix them up |
| 211 | * and return to the Guest without it knowing. |
| 212 | * |
| 213 | * If we fixed up the fault (ie. we mapped the address), this routine returns |
| 214 | * true. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 215 | int demand_page(struct lguest *lg, unsigned long vaddr, int errcode) |
| 216 | { |
| 217 | gpgd_t gpgd; |
| 218 | spgd_t *spgd; |
| 219 | unsigned long gpte_ptr; |
| 220 | gpte_t gpte; |
| 221 | spte_t *spte; |
| 222 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 223 | /* First step: get the top-level Guest page table entry. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 224 | gpgd = mkgpgd(lgread_u32(lg, gpgd_addr(lg, vaddr))); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 225 | /* Toplevel not present? We can't map it in. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 226 | if (!(gpgd.flags & _PAGE_PRESENT)) |
| 227 | return 0; |
| 228 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 229 | /* Now look at the matching shadow entry. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 230 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); |
| 231 | if (!(spgd->flags & _PAGE_PRESENT)) { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 232 | /* No shadow entry: allocate a new shadow PTE page. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 233 | unsigned long ptepage = get_zeroed_page(GFP_KERNEL); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 234 | /* This is not really the Guest's fault, but killing it is |
| 235 | * simple for this corner case. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 236 | if (!ptepage) { |
| 237 | kill_guest(lg, "out of memory allocating pte page"); |
| 238 | return 0; |
| 239 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 240 | /* We check that the Guest pgd is OK. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 241 | check_gpgd(lg, gpgd); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 242 | /* And we copy the flags to the shadow PGD entry. The page |
| 243 | * number in the shadow PGD is the page we just allocated. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 244 | spgd->raw.val = (__pa(ptepage) | gpgd.flags); |
| 245 | } |
| 246 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 247 | /* OK, now we look at the lower level in the Guest page table: keep its |
| 248 | * address, because we might update it later. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 249 | gpte_ptr = gpte_addr(lg, gpgd, vaddr); |
| 250 | gpte = mkgpte(lgread_u32(lg, gpte_ptr)); |
| 251 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 252 | /* If this page isn't in the Guest page tables, we can't page it in. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 253 | if (!(gpte.flags & _PAGE_PRESENT)) |
| 254 | return 0; |
| 255 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 256 | /* Check they're not trying to write to a page the Guest wants |
| 257 | * read-only (bit 2 of errcode == write). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 258 | if ((errcode & 2) && !(gpte.flags & _PAGE_RW)) |
| 259 | return 0; |
| 260 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 261 | /* User access to a kernel page? (bit 3 == user access) */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 262 | if ((errcode & 4) && !(gpte.flags & _PAGE_USER)) |
| 263 | return 0; |
| 264 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 265 | /* Check that the Guest PTE flags are OK, and the page number is below |
| 266 | * the pfn_limit (ie. not mapping the Launcher binary). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 267 | check_gpte(lg, gpte); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 268 | /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 269 | gpte.flags |= _PAGE_ACCESSED; |
| 270 | if (errcode & 2) |
| 271 | gpte.flags |= _PAGE_DIRTY; |
| 272 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 273 | /* Get the pointer to the shadow PTE entry we're going to set. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 274 | spte = spte_addr(lg, *spgd, vaddr); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 275 | /* If there was a valid shadow PTE entry here before, we release it. |
| 276 | * This can happen with a write to a previously read-only entry. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 277 | release_pte(*spte); |
| 278 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 279 | /* If this is a write, we insist that the Guest page is writable (the |
| 280 | * final arg to gpte_to_spte()). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 281 | if (gpte.flags & _PAGE_DIRTY) |
| 282 | *spte = gpte_to_spte(lg, gpte, 1); |
| 283 | else { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 284 | /* If this is a read, don't set the "writable" bit in the page |
| 285 | * table entry, even if the Guest says it's writable. That way |
| 286 | * we come back here when a write does actually ocur, so we can |
| 287 | * update the Guest's _PAGE_DIRTY flag. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 288 | gpte_t ro_gpte = gpte; |
| 289 | ro_gpte.flags &= ~_PAGE_RW; |
| 290 | *spte = gpte_to_spte(lg, ro_gpte, 0); |
| 291 | } |
| 292 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 293 | /* Finally, we write the Guest PTE entry back: we've set the |
| 294 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 295 | lgwrite_u32(lg, gpte_ptr, gpte.raw.val); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 296 | |
| 297 | /* We succeeded in mapping the page! */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 298 | return 1; |
| 299 | } |
| 300 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 301 | /*H:360 (ii) Setting up the page table entry for the Guest stack. |
| 302 | * |
| 303 | * Remember pin_stack_pages() which makes sure the stack is mapped? It could |
| 304 | * simply call demand_page(), but as we've seen that logic is quite long, and |
| 305 | * usually the stack pages are already mapped anyway, so it's not required. |
| 306 | * |
| 307 | * This is a quick version which answers the question: is this virtual address |
| 308 | * mapped by the shadow page tables, and is it writable? */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 309 | static int page_writable(struct lguest *lg, unsigned long vaddr) |
| 310 | { |
| 311 | spgd_t *spgd; |
| 312 | unsigned long flags; |
| 313 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 314 | /* Look at the top level entry: is it present? */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 315 | spgd = spgd_addr(lg, lg->pgdidx, vaddr); |
| 316 | if (!(spgd->flags & _PAGE_PRESENT)) |
| 317 | return 0; |
| 318 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 319 | /* Check the flags on the pte entry itself: it must be present and |
| 320 | * writable. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 321 | flags = spte_addr(lg, *spgd, vaddr)->flags; |
| 322 | return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); |
| 323 | } |
| 324 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 325 | /* So, when pin_stack_pages() asks us to pin a page, we check if it's already |
| 326 | * in the page tables, and if not, we call demand_page() with error code 2 |
| 327 | * (meaning "write"). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 328 | void pin_page(struct lguest *lg, unsigned long vaddr) |
| 329 | { |
| 330 | if (!page_writable(lg, vaddr) && !demand_page(lg, vaddr, 2)) |
| 331 | kill_guest(lg, "bad stack page %#lx", vaddr); |
| 332 | } |
| 333 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 334 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 335 | static void release_pgd(struct lguest *lg, spgd_t *spgd) |
| 336 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 337 | /* If the entry's not present, there's nothing to release. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 338 | if (spgd->flags & _PAGE_PRESENT) { |
| 339 | unsigned int i; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 340 | /* Converting the pfn to find the actual PTE page is easy: turn |
| 341 | * the page number into a physical address, then convert to a |
| 342 | * virtual address (easy for kernel pages like this one). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 343 | spte_t *ptepage = __va(spgd->pfn << PAGE_SHIFT); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 344 | /* For each entry in the page, we might need to release it. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 345 | for (i = 0; i < PTES_PER_PAGE; i++) |
| 346 | release_pte(ptepage[i]); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 347 | /* Now we can free the page of PTEs */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 348 | free_page((long)ptepage); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 349 | /* And zero out the PGD entry we we never release it twice. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 350 | spgd->raw.val = 0; |
| 351 | } |
| 352 | } |
| 353 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 354 | /*H:440 (v) Flushing (thowing away) page tables, |
| 355 | * |
| 356 | * We saw flush_user_mappings() called when we re-used a top-level pgdir page. |
| 357 | * It simply releases every PTE page from 0 up to the kernel address. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 358 | static void flush_user_mappings(struct lguest *lg, int idx) |
| 359 | { |
| 360 | unsigned int i; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 361 | /* Release every pgd entry up to the kernel's address. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 362 | for (i = 0; i < vaddr_to_pgd_index(lg->page_offset); i++) |
| 363 | release_pgd(lg, lg->pgdirs[idx].pgdir + i); |
| 364 | } |
| 365 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 366 | /* The Guest also has a hypercall to do this manually: it's used when a large |
| 367 | * number of mappings have been changed. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 368 | void guest_pagetable_flush_user(struct lguest *lg) |
| 369 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 370 | /* Drop the userspace part of the current page table. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 371 | flush_user_mappings(lg, lg->pgdidx); |
| 372 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 373 | /*:*/ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 374 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 375 | /* We keep several page tables. This is a simple routine to find the page |
| 376 | * table (if any) corresponding to this top-level address the Guest has given |
| 377 | * us. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 378 | static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) |
| 379 | { |
| 380 | unsigned int i; |
| 381 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
| 382 | if (lg->pgdirs[i].cr3 == pgtable) |
| 383 | break; |
| 384 | return i; |
| 385 | } |
| 386 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 387 | /*H:435 And this is us, creating the new page directory. If we really do |
| 388 | * allocate a new one (and so the kernel parts are not there), we set |
| 389 | * blank_pgdir. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 390 | static unsigned int new_pgdir(struct lguest *lg, |
| 391 | unsigned long cr3, |
| 392 | int *blank_pgdir) |
| 393 | { |
| 394 | unsigned int next; |
| 395 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 396 | /* We pick one entry at random to throw out. Choosing the Least |
| 397 | * Recently Used might be better, but this is easy. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 398 | next = random32() % ARRAY_SIZE(lg->pgdirs); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 399 | /* If it's never been allocated at all before, try now. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 400 | if (!lg->pgdirs[next].pgdir) { |
| 401 | lg->pgdirs[next].pgdir = (spgd_t *)get_zeroed_page(GFP_KERNEL); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 402 | /* If the allocation fails, just keep using the one we have */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 403 | if (!lg->pgdirs[next].pgdir) |
| 404 | next = lg->pgdidx; |
| 405 | else |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 406 | /* This is a blank page, so there are no kernel |
| 407 | * mappings: caller must map the stack! */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 408 | *blank_pgdir = 1; |
| 409 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 410 | /* Record which Guest toplevel this shadows. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 411 | lg->pgdirs[next].cr3 = cr3; |
| 412 | /* Release all the non-kernel mappings. */ |
| 413 | flush_user_mappings(lg, next); |
| 414 | |
| 415 | return next; |
| 416 | } |
| 417 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 418 | /*H:430 (iv) Switching page tables |
| 419 | * |
| 420 | * This is what happens when the Guest changes page tables (ie. changes the |
| 421 | * top-level pgdir). This happens on almost every context switch. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 422 | void guest_new_pagetable(struct lguest *lg, unsigned long pgtable) |
| 423 | { |
| 424 | int newpgdir, repin = 0; |
| 425 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 426 | /* Look to see if we have this one already. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 427 | newpgdir = find_pgdir(lg, pgtable); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 428 | /* If not, we allocate or mug an existing one: if it's a fresh one, |
| 429 | * repin gets set to 1. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 430 | if (newpgdir == ARRAY_SIZE(lg->pgdirs)) |
| 431 | newpgdir = new_pgdir(lg, pgtable, &repin); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 432 | /* Change the current pgd index to the new one. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 433 | lg->pgdidx = newpgdir; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 434 | /* If it was completely blank, we map in the Guest kernel stack */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 435 | if (repin) |
| 436 | pin_stack_pages(lg); |
| 437 | } |
| 438 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 439 | /*H:470 Finally, a routine which throws away everything: all PGD entries in all |
| 440 | * the shadow page tables. This is used when we destroy the Guest. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 441 | static void release_all_pagetables(struct lguest *lg) |
| 442 | { |
| 443 | unsigned int i, j; |
| 444 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 445 | /* Every shadow pagetable this Guest has */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 446 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
| 447 | if (lg->pgdirs[i].pgdir) |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 448 | /* Every PGD entry except the Switcher at the top */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 449 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) |
| 450 | release_pgd(lg, lg->pgdirs[i].pgdir + j); |
| 451 | } |
| 452 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 453 | /* We also throw away everything when a Guest tells us it's changed a kernel |
| 454 | * mapping. Since kernel mappings are in every page table, it's easiest to |
| 455 | * throw them all away. This is amazingly slow, but thankfully rare. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 456 | void guest_pagetable_clear_all(struct lguest *lg) |
| 457 | { |
| 458 | release_all_pagetables(lg); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 459 | /* We need the Guest kernel stack mapped again. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 460 | pin_stack_pages(lg); |
| 461 | } |
| 462 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 463 | /*H:420 This is the routine which actually sets the page table entry for then |
| 464 | * "idx"'th shadow page table. |
| 465 | * |
| 466 | * Normally, we can just throw out the old entry and replace it with 0: if they |
| 467 | * use it demand_page() will put the new entry in. We need to do this anyway: |
| 468 | * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page |
| 469 | * is read from, and _PAGE_DIRTY when it's written to. |
| 470 | * |
| 471 | * But Avi Kivity pointed out that most Operating Systems (Linux included) set |
| 472 | * these bits on PTEs immediately anyway. This is done to save the CPU from |
| 473 | * having to update them, but it helps us the same way: if they set |
| 474 | * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if |
| 475 | * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. |
| 476 | */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 477 | static void do_set_pte(struct lguest *lg, int idx, |
| 478 | unsigned long vaddr, gpte_t gpte) |
| 479 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 480 | /* Look up the matching shadow page directot entry. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 481 | spgd_t *spgd = spgd_addr(lg, idx, vaddr); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 482 | |
| 483 | /* If the top level isn't present, there's no entry to update. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 484 | if (spgd->flags & _PAGE_PRESENT) { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 485 | /* Otherwise, we start by releasing the existing entry. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 486 | spte_t *spte = spte_addr(lg, *spgd, vaddr); |
| 487 | release_pte(*spte); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 488 | |
| 489 | /* If they're setting this entry as dirty or accessed, we might |
| 490 | * as well put that entry they've given us in now. This shaves |
| 491 | * 10% off a copy-on-write micro-benchmark. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 492 | if (gpte.flags & (_PAGE_DIRTY | _PAGE_ACCESSED)) { |
| 493 | check_gpte(lg, gpte); |
| 494 | *spte = gpte_to_spte(lg, gpte, gpte.flags&_PAGE_DIRTY); |
| 495 | } else |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 496 | /* Otherwise we can demand_page() it in later. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 497 | spte->raw.val = 0; |
| 498 | } |
| 499 | } |
| 500 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 501 | /*H:410 Updating a PTE entry is a little trickier. |
| 502 | * |
| 503 | * We keep track of several different page tables (the Guest uses one for each |
| 504 | * process, so it makes sense to cache at least a few). Each of these have |
| 505 | * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for |
| 506 | * all processes. So when the page table above that address changes, we update |
| 507 | * all the page tables, not just the current one. This is rare. |
| 508 | * |
| 509 | * The benefit is that when we have to track a new page table, we can copy keep |
| 510 | * all the kernel mappings. This speeds up context switch immensely. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 511 | void guest_set_pte(struct lguest *lg, |
| 512 | unsigned long cr3, unsigned long vaddr, gpte_t gpte) |
| 513 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 514 | /* Kernel mappings must be changed on all top levels. Slow, but |
| 515 | * doesn't happen often. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 516 | if (vaddr >= lg->page_offset) { |
| 517 | unsigned int i; |
| 518 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
| 519 | if (lg->pgdirs[i].pgdir) |
| 520 | do_set_pte(lg, i, vaddr, gpte); |
| 521 | } else { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 522 | /* Is this page table one we have a shadow for? */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 523 | int pgdir = find_pgdir(lg, cr3); |
| 524 | if (pgdir != ARRAY_SIZE(lg->pgdirs)) |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 525 | /* If so, do the update. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 526 | do_set_pte(lg, pgdir, vaddr, gpte); |
| 527 | } |
| 528 | } |
| 529 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 530 | /*H:400 |
| 531 | * (iii) Setting up a page table entry when the Guest tells us it has changed. |
| 532 | * |
| 533 | * Just like we did in interrupts_and_traps.c, it makes sense for us to deal |
| 534 | * with the other side of page tables while we're here: what happens when the |
| 535 | * Guest asks for a page table to be updated? |
| 536 | * |
| 537 | * We already saw that demand_page() will fill in the shadow page tables when |
| 538 | * needed, so we can simply remove shadow page table entries whenever the Guest |
| 539 | * tells us they've changed. When the Guest tries to use the new entry it will |
| 540 | * fault and demand_page() will fix it up. |
| 541 | * |
| 542 | * So with that in mind here's our code to to update a (top-level) PGD entry: |
| 543 | */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 544 | void guest_set_pmd(struct lguest *lg, unsigned long cr3, u32 idx) |
| 545 | { |
| 546 | int pgdir; |
| 547 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 548 | /* The kernel seems to try to initialize this early on: we ignore its |
| 549 | * attempts to map over the Switcher. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 550 | if (idx >= SWITCHER_PGD_INDEX) |
| 551 | return; |
| 552 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 553 | /* If they're talking about a page table we have a shadow for... */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 554 | pgdir = find_pgdir(lg, cr3); |
| 555 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 556 | /* ... throw it away. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 557 | release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); |
| 558 | } |
| 559 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 560 | /*H:500 (vii) Setting up the page tables initially. |
| 561 | * |
| 562 | * When a Guest is first created, the Launcher tells us where the toplevel of |
| 563 | * its first page table is. We set some things up here: */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 564 | int init_guest_pagetable(struct lguest *lg, unsigned long pgtable) |
| 565 | { |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 566 | /* In flush_user_mappings() we loop from 0 to |
| 567 | * "vaddr_to_pgd_index(lg->page_offset)". This assumes it won't hit |
| 568 | * the Switcher mappings, so check that now. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 569 | if (vaddr_to_pgd_index(lg->page_offset) >= SWITCHER_PGD_INDEX) |
| 570 | return -EINVAL; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 571 | /* We start on the first shadow page table, and give it a blank PGD |
| 572 | * page. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 573 | lg->pgdidx = 0; |
| 574 | lg->pgdirs[lg->pgdidx].cr3 = pgtable; |
| 575 | lg->pgdirs[lg->pgdidx].pgdir = (spgd_t*)get_zeroed_page(GFP_KERNEL); |
| 576 | if (!lg->pgdirs[lg->pgdidx].pgdir) |
| 577 | return -ENOMEM; |
| 578 | return 0; |
| 579 | } |
| 580 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 581 | /* When a Guest dies, our cleanup is fairly simple. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 582 | void free_guest_pagetable(struct lguest *lg) |
| 583 | { |
| 584 | unsigned int i; |
| 585 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 586 | /* Throw away all page table pages. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 587 | release_all_pagetables(lg); |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 588 | /* Now free the top levels: free_page() can handle 0 just fine. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 589 | for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) |
| 590 | free_page((long)lg->pgdirs[i].pgdir); |
| 591 | } |
| 592 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 593 | /*H:480 (vi) Mapping the Switcher when the Guest is about to run. |
| 594 | * |
| 595 | * The Switcher and the two pages for this CPU need to be available to the |
| 596 | * Guest (and not the pages for other CPUs). We have the appropriate PTE pages |
| 597 | * for each CPU already set up, we just need to hook them in. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 598 | void map_switcher_in_guest(struct lguest *lg, struct lguest_pages *pages) |
| 599 | { |
| 600 | spte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); |
| 601 | spgd_t switcher_pgd; |
| 602 | spte_t regs_pte; |
| 603 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 604 | /* Make the last PGD entry for this Guest point to the Switcher's PTE |
| 605 | * page for this CPU (with appropriate flags). */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 606 | switcher_pgd.pfn = __pa(switcher_pte_page) >> PAGE_SHIFT; |
| 607 | switcher_pgd.flags = _PAGE_KERNEL; |
| 608 | lg->pgdirs[lg->pgdidx].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; |
| 609 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 610 | /* We also change the Switcher PTE page. When we're running the Guest, |
| 611 | * we want the Guest's "regs" page to appear where the first Switcher |
| 612 | * page for this CPU is. This is an optimization: when the Switcher |
| 613 | * saves the Guest registers, it saves them into the first page of this |
| 614 | * CPU's "struct lguest_pages": if we make sure the Guest's register |
| 615 | * page is already mapped there, we don't have to copy them out |
| 616 | * again. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 617 | regs_pte.pfn = __pa(lg->regs_page) >> PAGE_SHIFT; |
| 618 | regs_pte.flags = _PAGE_KERNEL; |
| 619 | switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTES_PER_PAGE] |
| 620 | = regs_pte; |
| 621 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 622 | /*:*/ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 623 | |
| 624 | static void free_switcher_pte_pages(void) |
| 625 | { |
| 626 | unsigned int i; |
| 627 | |
| 628 | for_each_possible_cpu(i) |
| 629 | free_page((long)switcher_pte_page(i)); |
| 630 | } |
| 631 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 632 | /*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given |
| 633 | * the CPU number and the "struct page"s for the Switcher code itself. |
| 634 | * |
| 635 | * Currently the Switcher is less than a page long, so "pages" is always 1. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 636 | static __init void populate_switcher_pte_page(unsigned int cpu, |
| 637 | struct page *switcher_page[], |
| 638 | unsigned int pages) |
| 639 | { |
| 640 | unsigned int i; |
| 641 | spte_t *pte = switcher_pte_page(cpu); |
| 642 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 643 | /* The first entries are easy: they map the Switcher code. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 644 | for (i = 0; i < pages; i++) { |
| 645 | pte[i].pfn = page_to_pfn(switcher_page[i]); |
| 646 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED; |
| 647 | } |
| 648 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 649 | /* The only other thing we map is this CPU's pair of pages. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 650 | i = pages + cpu*2; |
| 651 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 652 | /* First page (Guest registers) is writable from the Guest */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 653 | pte[i].pfn = page_to_pfn(switcher_page[i]); |
| 654 | pte[i].flags = _PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW; |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 655 | /* The second page contains the "struct lguest_ro_state", and is |
| 656 | * read-only. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 657 | pte[i+1].pfn = page_to_pfn(switcher_page[i+1]); |
| 658 | pte[i+1].flags = _PAGE_PRESENT|_PAGE_ACCESSED; |
| 659 | } |
| 660 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 661 | /*H:510 At boot or module load time, init_pagetables() allocates and populates |
| 662 | * the Switcher PTE page for each CPU. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 663 | __init int init_pagetables(struct page **switcher_page, unsigned int pages) |
| 664 | { |
| 665 | unsigned int i; |
| 666 | |
| 667 | for_each_possible_cpu(i) { |
| 668 | switcher_pte_page(i) = (spte_t *)get_zeroed_page(GFP_KERNEL); |
| 669 | if (!switcher_pte_page(i)) { |
| 670 | free_switcher_pte_pages(); |
| 671 | return -ENOMEM; |
| 672 | } |
| 673 | populate_switcher_pte_page(i, switcher_page, pages); |
| 674 | } |
| 675 | return 0; |
| 676 | } |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 677 | /*:*/ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 678 | |
Rusty Russell | bff672e | 2007-07-26 10:41:04 -0700 | [diff] [blame] | 679 | /* Cleaning up simply involves freeing the PTE page for each CPU. */ |
Rusty Russell | d7e28ff | 2007-07-19 01:49:23 -0700 | [diff] [blame] | 680 | void free_pagetables(void) |
| 681 | { |
| 682 | free_switcher_pte_pages(); |
| 683 | } |