Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine specific setup for xen |
| 3 | * |
| 4 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/pm.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 11 | #include <linux/memblock.h> |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 12 | #include <linux/cpuidle.h> |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 13 | #include <linux/cpufreq.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 14 | |
| 15 | #include <asm/elf.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 16 | #include <asm/vdso.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 17 | #include <asm/e820.h> |
| 18 | #include <asm/setup.h> |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 19 | #include <asm/acpi.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 20 | #include <asm/xen/hypervisor.h> |
| 21 | #include <asm/xen/hypercall.h> |
| 22 | |
Ian Campbell | 45263cb | 2010-10-25 16:32:29 -0700 | [diff] [blame] | 23 | #include <xen/xen.h> |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 24 | #include <xen/page.h> |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 25 | #include <xen/interface/callback.h> |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 26 | #include <xen/interface/memory.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 27 | #include <xen/interface/physdev.h> |
| 28 | #include <xen/features.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 29 | #include "xen-ops.h" |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 30 | #include "vdso.h" |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 31 | |
| 32 | /* These are code, but not functions. Defined in entry.S */ |
| 33 | extern const char xen_hypervisor_callback[]; |
| 34 | extern const char xen_failsafe_callback[]; |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 35 | extern void xen_sysenter_target(void); |
| 36 | extern void xen_syscall_target(void); |
| 37 | extern void xen_syscall32_target(void); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 38 | |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 39 | /* Amount of extra memory space we add to the e820 ranges */ |
David Vrabel | 8b5d44a | 2011-09-28 17:46:34 +0100 | [diff] [blame] | 40 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 41 | |
David Vrabel | aa24411 | 2011-09-28 17:46:32 +0100 | [diff] [blame] | 42 | /* Number of pages released from the initial allocation. */ |
| 43 | unsigned long xen_released_pages; |
| 44 | |
Jeremy Fitzhardinge | 698bb8d | 2010-09-14 10:19:14 -0700 | [diff] [blame] | 45 | /* |
| 46 | * The maximum amount of extra memory compared to the base size. The |
| 47 | * main scaling factor is the size of struct page. At extreme ratios |
| 48 | * of base:extra, all the base memory can be filled with page |
| 49 | * structures for the extra memory, leaving no space for anything |
| 50 | * else. |
| 51 | * |
| 52 | * 10x seems like a reasonable balance between scaling flexibility and |
| 53 | * leaving a practically usable system. |
| 54 | */ |
| 55 | #define EXTRA_MEM_RATIO (10) |
| 56 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 57 | static void __init xen_add_extra_mem(u64 start, u64 size) |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 58 | { |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 59 | unsigned long pfn; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 60 | int i; |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 61 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 62 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
| 63 | /* Add new region. */ |
| 64 | if (xen_extra_mem[i].size == 0) { |
| 65 | xen_extra_mem[i].start = start; |
| 66 | xen_extra_mem[i].size = size; |
| 67 | break; |
| 68 | } |
| 69 | /* Append to existing region. */ |
| 70 | if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { |
| 71 | xen_extra_mem[i].size += size; |
| 72 | break; |
| 73 | } |
| 74 | } |
| 75 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) |
| 76 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 77 | |
Tejun Heo | d4bbf7e | 2011-11-28 09:46:22 -0800 | [diff] [blame] | 78 | memblock_reserve(start, size); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 79 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 80 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 81 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 82 | for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 83 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 84 | } |
| 85 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 86 | static unsigned long __init xen_release_chunk(unsigned long start, |
| 87 | unsigned long end) |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 88 | { |
| 89 | struct xen_memory_reservation reservation = { |
| 90 | .address_bits = 0, |
| 91 | .extent_order = 0, |
| 92 | .domid = DOMID_SELF |
| 93 | }; |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 94 | unsigned long len = 0; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 95 | unsigned long pfn; |
| 96 | int ret; |
| 97 | |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 98 | for(pfn = start; pfn < end; pfn++) { |
| 99 | unsigned long mfn = pfn_to_mfn(pfn); |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 100 | |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 101 | /* Make sure pfn exists to start with */ |
| 102 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
| 103 | continue; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 104 | |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 105 | set_xen_guest_handle(reservation.extent_start, &mfn); |
| 106 | reservation.nr_extents = 1; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 107 | |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 108 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
| 109 | &reservation); |
Igor Mammedov | 98f531d | 2011-08-02 11:45:25 +0200 | [diff] [blame] | 110 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 111 | if (ret == 1) { |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 112 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
Jeremy Fitzhardinge | f89e048 | 2009-09-16 12:38:33 -0700 | [diff] [blame] | 113 | len++; |
| 114 | } |
| 115 | } |
Konrad Rzeszutek Wilk | ca11823 | 2012-03-30 15:37:07 -0400 | [diff] [blame] | 116 | if (len) |
| 117 | printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", |
| 118 | start, end, len); |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 119 | |
| 120 | return len; |
| 121 | } |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame^] | 122 | static unsigned long __init xen_populate_physmap(unsigned long start, |
| 123 | unsigned long end) |
| 124 | { |
| 125 | struct xen_memory_reservation reservation = { |
| 126 | .address_bits = 0, |
| 127 | .extent_order = 0, |
| 128 | .domid = DOMID_SELF |
| 129 | }; |
| 130 | unsigned long len = 0; |
| 131 | int ret; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 132 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame^] | 133 | for (pfn = start; pfn < end; pfn++) { |
| 134 | unsigned long frame; |
| 135 | |
| 136 | /* Make sure pfn does not exists to start with */ |
| 137 | if (pfn_to_mfn(pfn) != INVALID_P2M_ENTRY) |
| 138 | continue; |
| 139 | |
| 140 | frame = pfn; |
| 141 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 142 | reservation.nr_extents = 1; |
| 143 | |
| 144 | ret = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); |
| 145 | WARN(ret != 1, "Failed to populate pfn %lx err=%d\n", pfn, ret); |
| 146 | if (ret == 1) { |
| 147 | if (!early_set_phys_to_machine(pfn, frame)) { |
| 148 | set_xen_guest_handle(reservation.extent_start, &frame); |
| 149 | reservation.nr_extents = 1; |
| 150 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, |
| 151 | &reservation); |
| 152 | break; |
| 153 | } |
| 154 | len++; |
| 155 | } else |
| 156 | break; |
| 157 | } |
| 158 | if (len) |
| 159 | printk(KERN_INFO "Populated %lx-%lx pfn range: %lu pages added\n", |
| 160 | start, end, len); |
| 161 | return len; |
| 162 | } |
| 163 | static unsigned long __init xen_populate_chunk( |
| 164 | const struct e820entry *list, size_t map_size, |
| 165 | unsigned long max_pfn, unsigned long *last_pfn, |
| 166 | unsigned long credits_left) |
| 167 | { |
| 168 | const struct e820entry *entry; |
| 169 | unsigned int i; |
| 170 | unsigned long done = 0; |
| 171 | unsigned long dest_pfn; |
| 172 | |
| 173 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
| 174 | unsigned long credits = credits_left; |
| 175 | unsigned long s_pfn; |
| 176 | unsigned long e_pfn; |
| 177 | unsigned long pfns; |
| 178 | long capacity; |
| 179 | |
| 180 | if (credits <= 0) |
| 181 | break; |
| 182 | |
| 183 | if (entry->type != E820_RAM) |
| 184 | continue; |
| 185 | |
| 186 | e_pfn = PFN_UP(entry->addr + entry->size); |
| 187 | |
| 188 | /* We only care about E820 after the xen_start_info->nr_pages */ |
| 189 | if (e_pfn <= max_pfn) |
| 190 | continue; |
| 191 | |
| 192 | s_pfn = PFN_DOWN(entry->addr); |
| 193 | /* If the E820 falls within the nr_pages, we want to start |
| 194 | * at the nr_pages PFN. |
| 195 | * If that would mean going past the E820 entry, skip it |
| 196 | */ |
| 197 | if (s_pfn <= max_pfn) { |
| 198 | capacity = e_pfn - max_pfn; |
| 199 | dest_pfn = max_pfn; |
| 200 | } else { |
| 201 | /* last_pfn MUST be within E820_RAM regions */ |
| 202 | if (*last_pfn && e_pfn >= *last_pfn) |
| 203 | s_pfn = *last_pfn; |
| 204 | capacity = e_pfn - s_pfn; |
| 205 | dest_pfn = s_pfn; |
| 206 | } |
| 207 | /* If we had filled this E820_RAM entry, go to the next one. */ |
| 208 | if (capacity <= 0) |
| 209 | continue; |
| 210 | |
| 211 | if (credits > capacity) |
| 212 | credits = capacity; |
| 213 | |
| 214 | pfns = xen_populate_physmap(dest_pfn, dest_pfn + credits); |
| 215 | done += pfns; |
| 216 | credits_left -= pfns; |
| 217 | *last_pfn = (dest_pfn + pfns); |
| 218 | } |
| 219 | return done; |
| 220 | } |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 221 | static unsigned long __init xen_set_identity_and_release( |
| 222 | const struct e820entry *list, size_t map_size, unsigned long nr_pages) |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 223 | { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 224 | phys_addr_t start = 0; |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 225 | unsigned long released = 0; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 226 | unsigned long identity = 0; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 227 | const struct e820entry *entry; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 228 | int i; |
| 229 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 230 | /* |
| 231 | * Combine non-RAM regions and gaps until a RAM region (or the |
| 232 | * end of the map) is reached, then set the 1:1 map and |
| 233 | * release the pages (if available) in those non-RAM regions. |
| 234 | * |
| 235 | * The combined non-RAM regions are rounded to a whole number |
| 236 | * of pages so any partial pages are accessible via the 1:1 |
| 237 | * mapping. This is needed for some BIOSes that put (for |
| 238 | * example) the DMI tables in a reserved region that begins on |
| 239 | * a non-page boundary. |
| 240 | */ |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 241 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 242 | phys_addr_t end = entry->addr + entry->size; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 243 | if (entry->type == E820_RAM || i == map_size - 1) { |
| 244 | unsigned long start_pfn = PFN_DOWN(start); |
| 245 | unsigned long end_pfn = PFN_UP(end); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 246 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 247 | if (entry->type == E820_RAM) |
| 248 | end_pfn = PFN_UP(entry->addr); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 249 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 250 | if (start_pfn < end_pfn) { |
| 251 | if (start_pfn < nr_pages) |
| 252 | released += xen_release_chunk( |
| 253 | start_pfn, min(end_pfn, nr_pages)); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 254 | |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 255 | identity += set_phys_range_identity( |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 256 | start_pfn, end_pfn); |
| 257 | } |
| 258 | start = end; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 259 | } |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 260 | } |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 261 | |
Konrad Rzeszutek Wilk | ca11823 | 2012-03-30 15:37:07 -0400 | [diff] [blame] | 262 | if (released) |
| 263 | printk(KERN_INFO "Released %lu pages of unused memory\n", released); |
| 264 | if (identity) |
| 265 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 266 | |
| 267 | return released; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 268 | } |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 269 | |
| 270 | static unsigned long __init xen_get_max_pages(void) |
| 271 | { |
| 272 | unsigned long max_pages = MAX_DOMAIN_PAGES; |
| 273 | domid_t domid = DOMID_SELF; |
| 274 | int ret; |
| 275 | |
Ian Campbell | d3db728 | 2011-12-14 12:16:08 +0000 | [diff] [blame] | 276 | /* |
| 277 | * For the initial domain we use the maximum reservation as |
| 278 | * the maximum page. |
| 279 | * |
| 280 | * For guest domains the current maximum reservation reflects |
| 281 | * the current maximum rather than the static maximum. In this |
| 282 | * case the e820 map provided to us will cover the static |
| 283 | * maximum region. |
| 284 | */ |
| 285 | if (xen_initial_domain()) { |
| 286 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); |
| 287 | if (ret > 0) |
| 288 | max_pages = ret; |
| 289 | } |
| 290 | |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 291 | return min(max_pages, MAX_DOMAIN_PAGES); |
| 292 | } |
| 293 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 294 | static void xen_align_and_add_e820_region(u64 start, u64 size, int type) |
| 295 | { |
| 296 | u64 end = start + size; |
| 297 | |
| 298 | /* Align RAM regions to page boundaries. */ |
| 299 | if (type == E820_RAM) { |
| 300 | start = PAGE_ALIGN(start); |
| 301 | end &= ~((u64)PAGE_SIZE - 1); |
| 302 | } |
| 303 | |
| 304 | e820_add_region(start, end - start, type); |
| 305 | } |
| 306 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 307 | /** |
| 308 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
| 309 | **/ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 310 | char * __init xen_memory_setup(void) |
| 311 | { |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 312 | static struct e820entry map[E820MAX] __initdata; |
| 313 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 314 | unsigned long max_pfn = xen_start_info->nr_pages; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 315 | unsigned long long mem_end; |
| 316 | int rc; |
| 317 | struct xen_memory_map memmap; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 318 | unsigned long max_pages; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame^] | 319 | unsigned long last_pfn = 0; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 320 | unsigned long extra_pages = 0; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame^] | 321 | unsigned long populated; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 322 | int i; |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 323 | int op; |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 324 | |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 325 | max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 326 | mem_end = PFN_PHYS(max_pfn); |
| 327 | |
| 328 | memmap.nr_entries = E820MAX; |
| 329 | set_xen_guest_handle(memmap.buffer, map); |
| 330 | |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 331 | op = xen_initial_domain() ? |
| 332 | XENMEM_machine_memory_map : |
| 333 | XENMEM_memory_map; |
| 334 | rc = HYPERVISOR_memory_op(op, &memmap); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 335 | if (rc == -ENOSYS) { |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 336 | BUG_ON(xen_initial_domain()); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 337 | memmap.nr_entries = 1; |
| 338 | map[0].addr = 0ULL; |
| 339 | map[0].size = mem_end; |
| 340 | /* 8MB slack (to balance backend allocations). */ |
| 341 | map[0].size += 8ULL << 20; |
| 342 | map[0].type = E820_RAM; |
| 343 | rc = 0; |
| 344 | } |
| 345 | BUG_ON(rc); |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 346 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 347 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
| 348 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 349 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 350 | max_pages = xen_get_max_pages(); |
| 351 | if (max_pages > max_pfn) |
| 352 | extra_pages += max_pages - max_pfn; |
Stefano Stabellini | 7cb31b7 | 2011-01-27 10:13:25 -0500 | [diff] [blame] | 353 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 354 | /* |
| 355 | * Set P2M for all non-RAM pages and E820 gaps to be identity |
| 356 | * type PFNs. Any RAM pages that would be made inaccesible by |
| 357 | * this are first released. |
| 358 | */ |
| 359 | xen_released_pages = xen_set_identity_and_release( |
| 360 | map, memmap.nr_entries, max_pfn); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 361 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 362 | /* |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame^] | 363 | * Populate back the non-RAM pages and E820 gaps that had been |
| 364 | * released. */ |
| 365 | populated = xen_populate_chunk(map, memmap.nr_entries, |
| 366 | max_pfn, &last_pfn, xen_released_pages); |
| 367 | |
| 368 | extra_pages += (xen_released_pages - populated); |
| 369 | |
| 370 | if (last_pfn > max_pfn) { |
| 371 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
| 372 | mem_end = PFN_PHYS(max_pfn); |
| 373 | } |
| 374 | /* |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 375 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
| 376 | * factor the base size. On non-highmem systems, the base |
| 377 | * size is the full initial memory allocation; on highmem it |
| 378 | * is limited to the max size of lowmem, so that it doesn't |
| 379 | * get completely filled. |
| 380 | * |
| 381 | * In principle there could be a problem in lowmem systems if |
| 382 | * the initial memory is also very large with respect to |
| 383 | * lowmem, but we won't try to deal with that here. |
| 384 | */ |
| 385 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
| 386 | extra_pages); |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 387 | i = 0; |
| 388 | while (i < memmap.nr_entries) { |
| 389 | u64 addr = map[i].addr; |
| 390 | u64 size = map[i].size; |
| 391 | u32 type = map[i].type; |
| 392 | |
| 393 | if (type == E820_RAM) { |
| 394 | if (addr < mem_end) { |
| 395 | size = min(size, mem_end - addr); |
| 396 | } else if (extra_pages) { |
| 397 | size = min(size, (u64)extra_pages * PAGE_SIZE); |
| 398 | extra_pages -= size / PAGE_SIZE; |
| 399 | xen_add_extra_mem(addr, size); |
| 400 | } else |
| 401 | type = E820_UNUSABLE; |
Jeremy Fitzhardinge | 3654581 | 2010-09-29 16:54:33 -0700 | [diff] [blame] | 402 | } |
| 403 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 404 | xen_align_and_add_e820_region(addr, size, type); |
Jeremy Fitzhardinge | b5b43ce | 2010-09-02 17:10:12 -0700 | [diff] [blame] | 405 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 406 | map[i].addr += size; |
| 407 | map[i].size -= size; |
| 408 | if (map[i].size == 0) |
| 409 | i++; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 410 | } |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 411 | |
| 412 | /* |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 413 | * In domU, the ISA region is normal, usable memory, but we |
| 414 | * reserve ISA memory anyway because too many things poke |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 415 | * about in there. |
| 416 | */ |
| 417 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
| 418 | E820_RESERVED); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 419 | |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 420 | /* |
| 421 | * Reserve Xen bits: |
| 422 | * - mfn_list |
| 423 | * - xen_start_info |
| 424 | * See comment above "struct start_info" in <xen/interface/xen.h> |
| 425 | */ |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 426 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 427 | xen_start_info->pt_base - xen_start_info->mfn_list); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 428 | |
| 429 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
| 430 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 431 | return "Xen"; |
| 432 | } |
| 433 | |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 434 | /* |
| 435 | * Set the bit indicating "nosegneg" library variants should be used. |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 436 | * We only need to bother in pure 32-bit mode; compat 32-bit processes |
| 437 | * can have un-truncated segments, so wrapping around is allowed. |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 438 | */ |
Sam Ravnborg | 08b6d29 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 439 | static void __init fiddle_vdso(void) |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 440 | { |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 441 | #ifdef CONFIG_X86_32 |
| 442 | u32 *mask; |
| 443 | mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK); |
| 444 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
| 445 | mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK); |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 446 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 447 | #endif |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 448 | } |
| 449 | |
Daniel Kiper | ae15a3b | 2011-05-04 20:17:21 +0200 | [diff] [blame] | 450 | static int __cpuinit register_callback(unsigned type, const void *func) |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 451 | { |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 452 | struct callback_register callback = { |
| 453 | .type = type, |
| 454 | .address = XEN_CALLBACK(__KERNEL_CS, func), |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 455 | .flags = CALLBACKF_mask_events, |
| 456 | }; |
| 457 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 458 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
| 459 | } |
| 460 | |
| 461 | void __cpuinit xen_enable_sysenter(void) |
| 462 | { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 463 | int ret; |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 464 | unsigned sysenter_feature; |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 465 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 466 | #ifdef CONFIG_X86_32 |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 467 | sysenter_feature = X86_FEATURE_SEP; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 468 | #else |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 469 | sysenter_feature = X86_FEATURE_SYSENTER32; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 470 | #endif |
| 471 | |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 472 | if (!boot_cpu_has(sysenter_feature)) |
| 473 | return; |
| 474 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 475 | ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 476 | if(ret != 0) |
| 477 | setup_clear_cpu_cap(sysenter_feature); |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 478 | } |
| 479 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 480 | void __cpuinit xen_enable_syscall(void) |
| 481 | { |
| 482 | #ifdef CONFIG_X86_64 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 483 | int ret; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 484 | |
| 485 | ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); |
| 486 | if (ret != 0) { |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 487 | printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 488 | /* Pretty fatal; 64-bit userspace has no other |
| 489 | mechanism for syscalls. */ |
| 490 | } |
| 491 | |
| 492 | if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 493 | ret = register_callback(CALLBACKTYPE_syscall32, |
| 494 | xen_syscall32_target); |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 495 | if (ret != 0) |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 496 | setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 497 | } |
| 498 | #endif /* CONFIG_X86_64 */ |
| 499 | } |
| 500 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 501 | void __init xen_arch_setup(void) |
| 502 | { |
Donald Dutile | f09f6d1 | 2010-07-15 14:56:49 -0400 | [diff] [blame] | 503 | xen_panic_handler_init(); |
| 504 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 505 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
| 506 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); |
| 507 | |
| 508 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 509 | HYPERVISOR_vm_assist(VMASST_CMD_enable, |
| 510 | VMASST_TYPE_pae_extended_cr3); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 511 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 512 | if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || |
| 513 | register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) |
| 514 | BUG(); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 515 | |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 516 | xen_enable_sysenter(); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 517 | xen_enable_syscall(); |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 518 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 519 | #ifdef CONFIG_ACPI |
| 520 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
| 521 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
| 522 | disable_acpi(); |
| 523 | } |
| 524 | #endif |
| 525 | |
| 526 | memcpy(boot_command_line, xen_start_info->cmd_line, |
| 527 | MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? |
| 528 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); |
| 529 | |
Jeremy Fitzhardinge | bc15fde | 2010-11-22 17:17:50 -0800 | [diff] [blame] | 530 | /* Set up idle, making sure it calls safe_halt() pvop */ |
| 531 | #ifdef CONFIG_X86_32 |
| 532 | boot_cpu_data.hlt_works_ok = 1; |
| 533 | #endif |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 534 | disable_cpuidle(); |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 535 | disable_cpufreq(); |
Konrad Rzeszutek Wilk | e5fd47b | 2011-11-21 18:02:02 -0500 | [diff] [blame] | 536 | WARN_ON(set_pm_idle_to_default()); |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 537 | fiddle_vdso(); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 538 | } |