Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Machine specific setup for xen |
| 3 | * |
| 4 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 |
| 5 | */ |
| 6 | |
| 7 | #include <linux/module.h> |
| 8 | #include <linux/sched.h> |
| 9 | #include <linux/mm.h> |
| 10 | #include <linux/pm.h> |
Yinghai Lu | a9ce6bc | 2010-08-25 13:39:17 -0700 | [diff] [blame] | 11 | #include <linux/memblock.h> |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 12 | #include <linux/cpuidle.h> |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 13 | #include <linux/cpufreq.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 14 | |
| 15 | #include <asm/elf.h> |
Roland McGrath | 6c3652e | 2008-01-30 13:30:42 +0100 | [diff] [blame] | 16 | #include <asm/vdso.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 17 | #include <asm/e820.h> |
| 18 | #include <asm/setup.h> |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 19 | #include <asm/acpi.h> |
Konrad Rzeszutek Wilk | 8d54db79 | 2012-08-17 10:22:37 -0400 | [diff] [blame] | 20 | #include <asm/numa.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 21 | #include <asm/xen/hypervisor.h> |
| 22 | #include <asm/xen/hypercall.h> |
| 23 | |
Ian Campbell | 45263cb | 2010-10-25 16:32:29 -0700 | [diff] [blame] | 24 | #include <xen/xen.h> |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 25 | #include <xen/page.h> |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 26 | #include <xen/interface/callback.h> |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 27 | #include <xen/interface/memory.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 28 | #include <xen/interface/physdev.h> |
| 29 | #include <xen/features.h> |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 30 | #include "xen-ops.h" |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 31 | #include "vdso.h" |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 32 | #include "p2m.h" |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 33 | #include "mmu.h" |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 34 | |
| 35 | /* These are code, but not functions. Defined in entry.S */ |
| 36 | extern const char xen_hypervisor_callback[]; |
| 37 | extern const char xen_failsafe_callback[]; |
Konrad Rzeszutek Wilk | 6efa20e | 2013-07-19 11:51:31 -0400 | [diff] [blame] | 38 | #ifdef CONFIG_X86_64 |
Andi Kleen | 07ba06d | 2013-10-22 09:07:59 -0700 | [diff] [blame] | 39 | extern asmlinkage void nmi(void); |
Konrad Rzeszutek Wilk | 6efa20e | 2013-07-19 11:51:31 -0400 | [diff] [blame] | 40 | #endif |
Tej | f63c2f2 | 2008-12-16 11:56:06 -0800 | [diff] [blame] | 41 | extern void xen_sysenter_target(void); |
| 42 | extern void xen_syscall_target(void); |
| 43 | extern void xen_syscall32_target(void); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 44 | |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 45 | /* Amount of extra memory space we add to the e820 ranges */ |
David Vrabel | 8b5d44a | 2011-09-28 17:46:34 +0100 | [diff] [blame] | 46 | struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 47 | |
David Vrabel | aa24411 | 2011-09-28 17:46:32 +0100 | [diff] [blame] | 48 | /* Number of pages released from the initial allocation. */ |
| 49 | unsigned long xen_released_pages; |
| 50 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 51 | /* |
| 52 | * Buffer used to remap identity mapped pages. We only need the virtual space. |
| 53 | * The physical page behind this address is remapped as needed to different |
| 54 | * buffer pages. |
| 55 | */ |
| 56 | #define REMAP_SIZE (P2M_PER_PAGE - 3) |
| 57 | static struct { |
| 58 | unsigned long next_area_mfn; |
| 59 | unsigned long target_pfn; |
| 60 | unsigned long size; |
| 61 | unsigned long mfns[REMAP_SIZE]; |
| 62 | } xen_remap_buf __initdata __aligned(PAGE_SIZE); |
| 63 | static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 64 | |
Jeremy Fitzhardinge | 698bb8d | 2010-09-14 10:19:14 -0700 | [diff] [blame] | 65 | /* |
| 66 | * The maximum amount of extra memory compared to the base size. The |
| 67 | * main scaling factor is the size of struct page. At extreme ratios |
| 68 | * of base:extra, all the base memory can be filled with page |
| 69 | * structures for the extra memory, leaving no space for anything |
| 70 | * else. |
| 71 | * |
| 72 | * 10x seems like a reasonable balance between scaling flexibility and |
| 73 | * leaving a practically usable system. |
| 74 | */ |
| 75 | #define EXTRA_MEM_RATIO (10) |
| 76 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 77 | static void __init xen_add_extra_mem(u64 start, u64 size) |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 78 | { |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 79 | unsigned long pfn; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 80 | int i; |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 81 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 82 | for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { |
| 83 | /* Add new region. */ |
| 84 | if (xen_extra_mem[i].size == 0) { |
| 85 | xen_extra_mem[i].start = start; |
| 86 | xen_extra_mem[i].size = size; |
| 87 | break; |
| 88 | } |
| 89 | /* Append to existing region. */ |
| 90 | if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) { |
| 91 | xen_extra_mem[i].size += size; |
| 92 | break; |
| 93 | } |
| 94 | } |
| 95 | if (i == XEN_EXTRA_MEM_MAX_REGIONS) |
| 96 | printk(KERN_WARNING "Warning: not enough extra memory regions\n"); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 97 | |
Tejun Heo | d4bbf7e | 2011-11-28 09:46:22 -0800 | [diff] [blame] | 98 | memblock_reserve(start, size); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 99 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 100 | xen_max_p2m_pfn = PFN_DOWN(start + size); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 101 | for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { |
| 102 | unsigned long mfn = pfn_to_mfn(pfn); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 103 | |
David Vrabel | 2dcc9a3 | 2014-01-07 11:36:53 +0000 | [diff] [blame] | 104 | if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 105 | continue; |
David Vrabel | 2dcc9a3 | 2014-01-07 11:36:53 +0000 | [diff] [blame] | 106 | WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", |
| 107 | pfn, mfn); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 108 | |
Konrad Rzeszutek Wilk | 6eaa412 | 2011-01-18 20:09:41 -0500 | [diff] [blame] | 109 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
Konrad Rzeszutek Wilk | c96aae1 | 2012-08-17 16:43:28 -0400 | [diff] [blame] | 110 | } |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 113 | /* |
| 114 | * Finds the next RAM pfn available in the E820 map after min_pfn. |
| 115 | * This function updates min_pfn with the pfn found and returns |
| 116 | * the size of that range or zero if not found. |
| 117 | */ |
| 118 | static unsigned long __init xen_find_pfn_range( |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 119 | const struct e820entry *list, size_t map_size, |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 120 | unsigned long *min_pfn) |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 121 | { |
| 122 | const struct e820entry *entry; |
| 123 | unsigned int i; |
| 124 | unsigned long done = 0; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 125 | |
| 126 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 127 | unsigned long s_pfn; |
| 128 | unsigned long e_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 129 | |
| 130 | if (entry->type != E820_RAM) |
| 131 | continue; |
| 132 | |
zhenzhong.duan | c3d93f8 | 2012-07-18 13:06:39 +0800 | [diff] [blame] | 133 | e_pfn = PFN_DOWN(entry->addr + entry->size); |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 134 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 135 | /* We only care about E820 after this */ |
| 136 | if (e_pfn < *min_pfn) |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 137 | continue; |
| 138 | |
zhenzhong.duan | c3d93f8 | 2012-07-18 13:06:39 +0800 | [diff] [blame] | 139 | s_pfn = PFN_UP(entry->addr); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 140 | |
| 141 | /* If min_pfn falls within the E820 entry, we want to start |
| 142 | * at the min_pfn PFN. |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 143 | */ |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 144 | if (s_pfn <= *min_pfn) { |
| 145 | done = e_pfn - *min_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 146 | } else { |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 147 | done = e_pfn - s_pfn; |
| 148 | *min_pfn = s_pfn; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 149 | } |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 150 | break; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 151 | } |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 152 | |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 153 | return done; |
| 154 | } |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 155 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 156 | static int __init xen_free_mfn(unsigned long mfn) |
| 157 | { |
| 158 | struct xen_memory_reservation reservation = { |
| 159 | .address_bits = 0, |
| 160 | .extent_order = 0, |
| 161 | .domid = DOMID_SELF |
| 162 | }; |
| 163 | |
| 164 | set_xen_guest_handle(reservation.extent_start, &mfn); |
| 165 | reservation.nr_extents = 1; |
| 166 | |
| 167 | return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); |
| 168 | } |
| 169 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 170 | /* |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 171 | * This releases a chunk of memory and then does the identity map. It's used |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 172 | * as a fallback if the remapping fails. |
| 173 | */ |
| 174 | static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, |
| 175 | unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, |
| 176 | unsigned long *released) |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 177 | { |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 178 | unsigned long len = 0; |
| 179 | unsigned long pfn, end; |
| 180 | int ret; |
| 181 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 182 | WARN_ON(start_pfn > end_pfn); |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 183 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 184 | end = min(end_pfn, nr_pages); |
| 185 | for (pfn = start_pfn; pfn < end; pfn++) { |
| 186 | unsigned long mfn = pfn_to_mfn(pfn); |
| 187 | |
| 188 | /* Make sure pfn exists to start with */ |
| 189 | if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) |
| 190 | continue; |
| 191 | |
| 192 | ret = xen_free_mfn(mfn); |
| 193 | WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); |
| 194 | |
| 195 | if (ret == 1) { |
| 196 | if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) |
| 197 | break; |
| 198 | len++; |
| 199 | } else |
| 200 | break; |
| 201 | } |
| 202 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 203 | /* Need to release pages first */ |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 204 | *released += len; |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 205 | *identity += set_phys_range_identity(start_pfn, end_pfn); |
| 206 | } |
| 207 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 208 | /* |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 209 | * Helper function to update the p2m and m2p tables and kernel mapping. |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 210 | */ |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 211 | static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 212 | { |
| 213 | struct mmu_update update = { |
| 214 | .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, |
| 215 | .val = pfn |
| 216 | }; |
| 217 | |
| 218 | /* Update p2m */ |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 219 | if (!set_phys_to_machine(pfn, mfn)) { |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 220 | WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", |
| 221 | pfn, mfn); |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 222 | BUG(); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | /* Update m2p */ |
| 226 | if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { |
| 227 | WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", |
| 228 | mfn, pfn); |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 229 | BUG(); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 230 | } |
| 231 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 232 | /* Update kernel mapping, but not for highmem. */ |
| 233 | if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) |
| 234 | return; |
| 235 | |
| 236 | if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), |
| 237 | mfn_pte(mfn, PAGE_KERNEL), 0)) { |
| 238 | WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", |
| 239 | mfn, pfn); |
| 240 | BUG(); |
| 241 | } |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | /* |
| 245 | * This function updates the p2m and m2p tables with an identity map from |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 246 | * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the |
| 247 | * original allocation at remap_pfn. The information needed for remapping is |
| 248 | * saved in the memory itself to avoid the need for allocating buffers. The |
| 249 | * complete remap information is contained in a list of MFNs each containing |
| 250 | * up to REMAP_SIZE MFNs and the start target PFN for doing the remap. |
| 251 | * This enables us to preserve the original mfn sequence while doing the |
| 252 | * remapping at a time when the memory management is capable of allocating |
| 253 | * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and |
| 254 | * its callers. |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 255 | */ |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 256 | static void __init xen_do_set_identity_and_remap_chunk( |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 257 | unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) |
| 258 | { |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 259 | unsigned long buf = (unsigned long)&xen_remap_buf; |
| 260 | unsigned long mfn_save, mfn; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 261 | unsigned long ident_pfn_iter, remap_pfn_iter; |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 262 | unsigned long ident_end_pfn = start_pfn + size; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 263 | unsigned long left = size; |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 264 | unsigned long ident_cnt = 0; |
| 265 | unsigned int i, chunk; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 266 | |
| 267 | WARN_ON(size == 0); |
| 268 | |
| 269 | BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); |
| 270 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 271 | /* Don't use memory until remapped */ |
| 272 | memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size)); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 273 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 274 | mfn_save = virt_to_mfn(buf); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 275 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 276 | for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; |
| 277 | ident_pfn_iter < ident_end_pfn; |
| 278 | ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) { |
| 279 | chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 280 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 281 | /* Map first pfn to xen_remap_buf */ |
| 282 | mfn = pfn_to_mfn(ident_pfn_iter); |
| 283 | set_pte_mfn(buf, mfn, PAGE_KERNEL); |
| 284 | |
| 285 | /* Save mapping information in page */ |
| 286 | xen_remap_buf.next_area_mfn = xen_remap_mfn; |
| 287 | xen_remap_buf.target_pfn = remap_pfn_iter; |
| 288 | xen_remap_buf.size = chunk; |
| 289 | for (i = 0; i < chunk; i++) |
| 290 | xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i); |
| 291 | |
| 292 | /* Put remap buf into list. */ |
| 293 | xen_remap_mfn = mfn; |
| 294 | |
| 295 | /* Set identity map */ |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 296 | ident_cnt += set_phys_range_identity(ident_pfn_iter, |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 297 | ident_pfn_iter + chunk); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 298 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 299 | left -= chunk; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 300 | } |
| 301 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 302 | /* Restore old xen_remap_buf mapping */ |
| 303 | set_pte_mfn(buf, mfn_save, PAGE_KERNEL); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 304 | } |
| 305 | |
| 306 | /* |
| 307 | * This function takes a contiguous pfn range that needs to be identity mapped |
| 308 | * and: |
| 309 | * |
| 310 | * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn. |
| 311 | * 2) Calls the do_ function to actually do the mapping/remapping work. |
| 312 | * |
| 313 | * The goal is to not allocate additional memory but to remap the existing |
| 314 | * pages. In the case of an error the underlying memory is simply released back |
| 315 | * to Xen and not remapped. |
| 316 | */ |
| 317 | static unsigned long __init xen_set_identity_and_remap_chunk( |
| 318 | const struct e820entry *list, size_t map_size, unsigned long start_pfn, |
| 319 | unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 320 | unsigned long *identity, unsigned long *released) |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 321 | { |
| 322 | unsigned long pfn; |
| 323 | unsigned long i = 0; |
| 324 | unsigned long n = end_pfn - start_pfn; |
| 325 | |
| 326 | while (i < n) { |
| 327 | unsigned long cur_pfn = start_pfn + i; |
| 328 | unsigned long left = n - i; |
| 329 | unsigned long size = left; |
| 330 | unsigned long remap_range_size; |
| 331 | |
| 332 | /* Do not remap pages beyond the current allocation */ |
| 333 | if (cur_pfn >= nr_pages) { |
| 334 | /* Identity map remaining pages */ |
| 335 | *identity += set_phys_range_identity(cur_pfn, |
| 336 | cur_pfn + size); |
| 337 | break; |
| 338 | } |
| 339 | if (cur_pfn + size > nr_pages) |
| 340 | size = nr_pages - cur_pfn; |
| 341 | |
| 342 | remap_range_size = xen_find_pfn_range(list, map_size, |
| 343 | &remap_pfn); |
| 344 | if (!remap_range_size) { |
| 345 | pr_warning("Unable to find available pfn range, not remapping identity pages\n"); |
| 346 | xen_set_identity_and_release_chunk(cur_pfn, |
| 347 | cur_pfn + left, nr_pages, identity, released); |
| 348 | break; |
| 349 | } |
| 350 | /* Adjust size to fit in current e820 RAM region */ |
| 351 | if (size > remap_range_size) |
| 352 | size = remap_range_size; |
| 353 | |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 354 | xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 355 | |
| 356 | /* Update variables to reflect new mappings. */ |
| 357 | i += size; |
| 358 | remap_pfn += size; |
| 359 | *identity += size; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | /* |
| 363 | * If the PFNs are currently mapped, the VA mapping also needs |
| 364 | * to be updated to be 1:1. |
| 365 | */ |
| 366 | for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) |
| 367 | (void)HYPERVISOR_update_va_mapping( |
| 368 | (unsigned long)__va(pfn << PAGE_SHIFT), |
| 369 | mfn_pte(pfn, PAGE_KERNEL_IO), 0); |
| 370 | |
| 371 | return remap_pfn; |
| 372 | } |
| 373 | |
| 374 | static unsigned long __init xen_set_identity_and_remap( |
| 375 | const struct e820entry *list, size_t map_size, unsigned long nr_pages, |
| 376 | unsigned long *released) |
Miroslav Rezanina | 093d7b4 | 2009-09-16 03:56:17 -0400 | [diff] [blame] | 377 | { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 378 | phys_addr_t start = 0; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 379 | unsigned long identity = 0; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 380 | unsigned long last_pfn = nr_pages; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 381 | const struct e820entry *entry; |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 382 | unsigned long num_released = 0; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 383 | int i; |
| 384 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 385 | /* |
| 386 | * Combine non-RAM regions and gaps until a RAM region (or the |
| 387 | * end of the map) is reached, then set the 1:1 map and |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 388 | * remap the memory in those non-RAM regions. |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 389 | * |
| 390 | * The combined non-RAM regions are rounded to a whole number |
| 391 | * of pages so any partial pages are accessible via the 1:1 |
| 392 | * mapping. This is needed for some BIOSes that put (for |
| 393 | * example) the DMI tables in a reserved region that begins on |
| 394 | * a non-page boundary. |
| 395 | */ |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 396 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 397 | phys_addr_t end = entry->addr + entry->size; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 398 | if (entry->type == E820_RAM || i == map_size - 1) { |
| 399 | unsigned long start_pfn = PFN_DOWN(start); |
| 400 | unsigned long end_pfn = PFN_UP(end); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 401 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 402 | if (entry->type == E820_RAM) |
| 403 | end_pfn = PFN_UP(entry->addr); |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 404 | |
David Vrabel | 83d51ab | 2012-05-03 16:15:42 +0100 | [diff] [blame] | 405 | if (start_pfn < end_pfn) |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 406 | last_pfn = xen_set_identity_and_remap_chunk( |
| 407 | list, map_size, start_pfn, |
| 408 | end_pfn, nr_pages, last_pfn, |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 409 | &identity, &num_released); |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 410 | start = end; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 411 | } |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 412 | } |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 413 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 414 | *released = num_released; |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 415 | |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 416 | pr_info("Set %ld page(s) to 1-1 mapping\n", identity); |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 417 | pr_info("Released %ld page(s)\n", num_released); |
| 418 | |
| 419 | return last_pfn; |
Konrad Rzeszutek Wilk | 68df0da | 2011-02-01 17:15:30 -0500 | [diff] [blame] | 420 | } |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 421 | |
| 422 | /* |
| 423 | * Remap the memory prepared in xen_do_set_identity_and_remap_chunk(). |
| 424 | * The remap information (which mfn remap to which pfn) is contained in the |
| 425 | * to be remapped memory itself in a linked list anchored at xen_remap_mfn. |
| 426 | * This scheme allows to remap the different chunks in arbitrary order while |
| 427 | * the resulting mapping will be independant from the order. |
| 428 | */ |
| 429 | void __init xen_remap_memory(void) |
| 430 | { |
| 431 | unsigned long buf = (unsigned long)&xen_remap_buf; |
| 432 | unsigned long mfn_save, mfn, pfn; |
| 433 | unsigned long remapped = 0; |
| 434 | unsigned int i; |
| 435 | unsigned long pfn_s = ~0UL; |
| 436 | unsigned long len = 0; |
| 437 | |
| 438 | mfn_save = virt_to_mfn(buf); |
| 439 | |
| 440 | while (xen_remap_mfn != INVALID_P2M_ENTRY) { |
| 441 | /* Map the remap information */ |
| 442 | set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL); |
| 443 | |
| 444 | BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]); |
| 445 | |
| 446 | pfn = xen_remap_buf.target_pfn; |
| 447 | for (i = 0; i < xen_remap_buf.size; i++) { |
| 448 | mfn = xen_remap_buf.mfns[i]; |
| 449 | xen_update_mem_tables(pfn, mfn); |
| 450 | remapped++; |
| 451 | pfn++; |
| 452 | } |
| 453 | if (pfn_s == ~0UL || pfn == pfn_s) { |
| 454 | pfn_s = xen_remap_buf.target_pfn; |
| 455 | len += xen_remap_buf.size; |
| 456 | } else if (pfn_s + len == xen_remap_buf.target_pfn) { |
| 457 | len += xen_remap_buf.size; |
| 458 | } else { |
| 459 | memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len)); |
| 460 | pfn_s = xen_remap_buf.target_pfn; |
| 461 | len = xen_remap_buf.size; |
| 462 | } |
| 463 | |
| 464 | mfn = xen_remap_mfn; |
| 465 | xen_remap_mfn = xen_remap_buf.next_area_mfn; |
| 466 | } |
| 467 | |
| 468 | if (pfn_s != ~0UL && len) |
| 469 | memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len)); |
| 470 | |
| 471 | set_pte_mfn(buf, mfn_save, PAGE_KERNEL); |
| 472 | |
| 473 | pr_info("Remapped %ld page(s)\n", remapped); |
| 474 | } |
| 475 | |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 476 | static unsigned long __init xen_get_max_pages(void) |
| 477 | { |
| 478 | unsigned long max_pages = MAX_DOMAIN_PAGES; |
| 479 | domid_t domid = DOMID_SELF; |
| 480 | int ret; |
| 481 | |
Ian Campbell | d3db728 | 2011-12-14 12:16:08 +0000 | [diff] [blame] | 482 | /* |
| 483 | * For the initial domain we use the maximum reservation as |
| 484 | * the maximum page. |
| 485 | * |
| 486 | * For guest domains the current maximum reservation reflects |
| 487 | * the current maximum rather than the static maximum. In this |
| 488 | * case the e820 map provided to us will cover the static |
| 489 | * maximum region. |
| 490 | */ |
| 491 | if (xen_initial_domain()) { |
| 492 | ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); |
| 493 | if (ret > 0) |
| 494 | max_pages = ret; |
| 495 | } |
| 496 | |
David Vrabel | d312ae87 | 2011-08-19 15:57:16 +0100 | [diff] [blame] | 497 | return min(max_pages, MAX_DOMAIN_PAGES); |
| 498 | } |
| 499 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 500 | static void xen_align_and_add_e820_region(u64 start, u64 size, int type) |
| 501 | { |
| 502 | u64 end = start + size; |
| 503 | |
| 504 | /* Align RAM regions to page boundaries. */ |
| 505 | if (type == E820_RAM) { |
| 506 | start = PAGE_ALIGN(start); |
| 507 | end &= ~((u64)PAGE_SIZE - 1); |
| 508 | } |
| 509 | |
| 510 | e820_add_region(start, end - start, type); |
| 511 | } |
| 512 | |
David Vrabel | 3bc38cb | 2013-08-16 15:42:55 +0100 | [diff] [blame] | 513 | void xen_ignore_unusable(struct e820entry *list, size_t map_size) |
| 514 | { |
| 515 | struct e820entry *entry; |
| 516 | unsigned int i; |
| 517 | |
| 518 | for (i = 0, entry = list; i < map_size; i++, entry++) { |
| 519 | if (entry->type == E820_UNUSABLE) |
| 520 | entry->type = E820_RAM; |
| 521 | } |
| 522 | } |
| 523 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 524 | /** |
| 525 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
| 526 | **/ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 527 | char * __init xen_memory_setup(void) |
| 528 | { |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 529 | static struct e820entry map[E820MAX] __initdata; |
| 530 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 531 | unsigned long max_pfn = xen_start_info->nr_pages; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 532 | unsigned long long mem_end; |
| 533 | int rc; |
| 534 | struct xen_memory_map memmap; |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 535 | unsigned long max_pages; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 536 | unsigned long last_pfn = 0; |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 537 | unsigned long extra_pages = 0; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 538 | int i; |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 539 | int op; |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 540 | |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 541 | max_pfn = min(MAX_DOMAIN_PAGES, max_pfn); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 542 | mem_end = PFN_PHYS(max_pfn); |
| 543 | |
| 544 | memmap.nr_entries = E820MAX; |
| 545 | set_xen_guest_handle(memmap.buffer, map); |
| 546 | |
Ian Campbell | 9e9a5fc | 2010-09-02 16:16:00 +0100 | [diff] [blame] | 547 | op = xen_initial_domain() ? |
| 548 | XENMEM_machine_memory_map : |
| 549 | XENMEM_memory_map; |
| 550 | rc = HYPERVISOR_memory_op(op, &memmap); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 551 | if (rc == -ENOSYS) { |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 552 | BUG_ON(xen_initial_domain()); |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 553 | memmap.nr_entries = 1; |
| 554 | map[0].addr = 0ULL; |
| 555 | map[0].size = mem_end; |
| 556 | /* 8MB slack (to balance backend allocations). */ |
| 557 | map[0].size += 8ULL << 20; |
| 558 | map[0].type = E820_RAM; |
| 559 | rc = 0; |
| 560 | } |
| 561 | BUG_ON(rc); |
Martin Kelly | 1ea644c | 2014-10-16 20:48:11 -0700 | [diff] [blame] | 562 | BUG_ON(memmap.nr_entries == 0); |
Jeremy Fitzhardinge | 8006ec3 | 2008-05-26 23:31:19 +0100 | [diff] [blame] | 563 | |
David Vrabel | 3bc38cb | 2013-08-16 15:42:55 +0100 | [diff] [blame] | 564 | /* |
| 565 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE |
| 566 | * regions, so if we're using the machine memory map leave the |
| 567 | * region as RAM as it is in the pseudo-physical map. |
| 568 | * |
| 569 | * UNUSABLE regions in domUs are not handled and will need |
| 570 | * a patch in the future. |
| 571 | */ |
| 572 | if (xen_initial_domain()) |
| 573 | xen_ignore_unusable(map, memmap.nr_entries); |
| 574 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 575 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
| 576 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 577 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 578 | max_pages = xen_get_max_pages(); |
| 579 | if (max_pages > max_pfn) |
| 580 | extra_pages += max_pages - max_pfn; |
Stefano Stabellini | 7cb31b7 | 2011-01-27 10:13:25 -0500 | [diff] [blame] | 581 | |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 582 | /* |
Juergen Gross | 1f3ac86 | 2014-11-28 11:53:53 +0100 | [diff] [blame^] | 583 | * Set identity map on non-RAM pages and prepare remapping the |
| 584 | * underlying RAM. |
David Vrabel | f3f436e | 2011-09-28 17:46:36 +0100 | [diff] [blame] | 585 | */ |
Matt Rushton | 4fbb67e3 | 2014-08-11 11:57:57 -0700 | [diff] [blame] | 586 | last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, |
| 587 | &xen_released_pages); |
Jeremy Fitzhardinge | 42ee147 | 2010-08-30 16:41:02 -0700 | [diff] [blame] | 588 | |
Konrad Rzeszutek Wilk | 58b7b53 | 2012-05-29 12:36:43 -0400 | [diff] [blame] | 589 | extra_pages += xen_released_pages; |
Konrad Rzeszutek Wilk | 2e2fb75 | 2012-04-06 10:07:11 -0400 | [diff] [blame] | 590 | |
| 591 | if (last_pfn > max_pfn) { |
| 592 | max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); |
| 593 | mem_end = PFN_PHYS(max_pfn); |
| 594 | } |
| 595 | /* |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 596 | * Clamp the amount of extra memory to a EXTRA_MEM_RATIO |
| 597 | * factor the base size. On non-highmem systems, the base |
| 598 | * size is the full initial memory allocation; on highmem it |
| 599 | * is limited to the max size of lowmem, so that it doesn't |
| 600 | * get completely filled. |
| 601 | * |
| 602 | * In principle there could be a problem in lowmem systems if |
| 603 | * the initial memory is also very large with respect to |
| 604 | * lowmem, but we won't try to deal with that here. |
| 605 | */ |
| 606 | extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), |
| 607 | extra_pages); |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 608 | i = 0; |
| 609 | while (i < memmap.nr_entries) { |
| 610 | u64 addr = map[i].addr; |
| 611 | u64 size = map[i].size; |
| 612 | u32 type = map[i].type; |
| 613 | |
| 614 | if (type == E820_RAM) { |
| 615 | if (addr < mem_end) { |
| 616 | size = min(size, mem_end - addr); |
| 617 | } else if (extra_pages) { |
| 618 | size = min(size, (u64)extra_pages * PAGE_SIZE); |
| 619 | extra_pages -= size / PAGE_SIZE; |
| 620 | xen_add_extra_mem(addr, size); |
| 621 | } else |
| 622 | type = E820_UNUSABLE; |
Jeremy Fitzhardinge | 3654581 | 2010-09-29 16:54:33 -0700 | [diff] [blame] | 623 | } |
| 624 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 625 | xen_align_and_add_e820_region(addr, size, type); |
Jeremy Fitzhardinge | b5b43ce | 2010-09-02 17:10:12 -0700 | [diff] [blame] | 626 | |
David Vrabel | dc91c72 | 2011-09-29 12:26:19 +0100 | [diff] [blame] | 627 | map[i].addr += size; |
| 628 | map[i].size -= size; |
| 629 | if (map[i].size == 0) |
| 630 | i++; |
Ian Campbell | 35ae11f | 2009-02-06 19:09:48 -0800 | [diff] [blame] | 631 | } |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 632 | |
| 633 | /* |
David Vrabel | 25b884a | 2014-01-03 15:46:10 +0000 | [diff] [blame] | 634 | * Set the rest as identity mapped, in case PCI BARs are |
| 635 | * located here. |
| 636 | * |
| 637 | * PFNs above MAX_P2M_PFN are considered identity mapped as |
| 638 | * well. |
| 639 | */ |
| 640 | set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul); |
| 641 | |
| 642 | /* |
Ian Campbell | 9ec23a7f | 2010-10-28 11:32:29 -0700 | [diff] [blame] | 643 | * In domU, the ISA region is normal, usable memory, but we |
| 644 | * reserve ISA memory anyway because too many things poke |
Jeremy Fitzhardinge | b792c75 | 2008-06-16 14:54:49 -0700 | [diff] [blame] | 645 | * about in there. |
| 646 | */ |
| 647 | e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, |
| 648 | E820_RESERVED); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 649 | |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 650 | /* |
| 651 | * Reserve Xen bits: |
| 652 | * - mfn_list |
| 653 | * - xen_start_info |
| 654 | * See comment above "struct start_info" in <xen/interface/xen.h> |
Konrad Rzeszutek Wilk | 51faaf2 | 2012-08-22 13:00:10 -0400 | [diff] [blame] | 655 | * We tried to make the the memblock_reserve more selective so |
| 656 | * that it would be clear what region is reserved. Sadly we ran |
| 657 | * in the problem wherein on a 64-bit hypervisor with a 32-bit |
| 658 | * initial domain, the pt_base has the cr3 value which is not |
| 659 | * neccessarily where the pagetable starts! As Jan put it: " |
| 660 | * Actually, the adjustment turns out to be correct: The page |
| 661 | * tables for a 32-on-64 dom0 get allocated in the order "first L1", |
| 662 | * "first L2", "first L3", so the offset to the page table base is |
| 663 | * indeed 2. When reading xen/include/public/xen.h's comment |
| 664 | * very strictly, this is not a violation (since there nothing is said |
| 665 | * that the first thing in the page table space is pointed to by |
| 666 | * pt_base; I admit that this seems to be implied though, namely |
| 667 | * do I think that it is implied that the page table space is the |
| 668 | * range [pt_base, pt_base + nt_pt_frames), whereas that |
| 669 | * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames), |
| 670 | * which - without a priori knowledge - the kernel would have |
| 671 | * difficulty to figure out)." - so lets just fall back to the |
| 672 | * easy way and reserve the whole region. |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 673 | */ |
Tejun Heo | 24aa078 | 2011-07-12 11:16:06 +0200 | [diff] [blame] | 674 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 675 | xen_start_info->pt_base - xen_start_info->mfn_list); |
Jeremy Fitzhardinge | be5bf9f | 2008-06-16 14:54:46 -0700 | [diff] [blame] | 676 | |
| 677 | sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); |
| 678 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 679 | return "Xen"; |
| 680 | } |
| 681 | |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 682 | /* |
David Vrabel | abacaad | 2014-06-02 17:58:01 +0100 | [diff] [blame] | 683 | * Machine specific memory setup for auto-translated guests. |
| 684 | */ |
| 685 | char * __init xen_auto_xlated_memory_setup(void) |
| 686 | { |
| 687 | static struct e820entry map[E820MAX] __initdata; |
| 688 | |
| 689 | struct xen_memory_map memmap; |
| 690 | int i; |
| 691 | int rc; |
| 692 | |
| 693 | memmap.nr_entries = E820MAX; |
| 694 | set_xen_guest_handle(memmap.buffer, map); |
| 695 | |
| 696 | rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); |
| 697 | if (rc < 0) |
| 698 | panic("No memory map (%d)\n", rc); |
| 699 | |
| 700 | sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries); |
| 701 | |
| 702 | for (i = 0; i < memmap.nr_entries; i++) |
| 703 | e820_add_region(map[i].addr, map[i].size, map[i].type); |
| 704 | |
| 705 | memblock_reserve(__pa(xen_start_info->mfn_list), |
| 706 | xen_start_info->pt_base - xen_start_info->mfn_list); |
| 707 | |
| 708 | return "Xen"; |
| 709 | } |
| 710 | |
| 711 | /* |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 712 | * Set the bit indicating "nosegneg" library variants should be used. |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 713 | * We only need to bother in pure 32-bit mode; compat 32-bit processes |
| 714 | * can have un-truncated segments, so wrapping around is allowed. |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 715 | */ |
Sam Ravnborg | 08b6d29 | 2008-01-30 13:33:25 +0100 | [diff] [blame] | 716 | static void __init fiddle_vdso(void) |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 717 | { |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 718 | #ifdef CONFIG_X86_32 |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 719 | /* |
| 720 | * This could be called before selected_vdso32 is initialized, so |
| 721 | * just fiddle with both possible images. vdso_image_32_syscall |
| 722 | * can't be selected, since it only exists on 64-bit systems. |
| 723 | */ |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 724 | u32 *mask; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 725 | mask = vdso_image_32_int80.data + |
| 726 | vdso_image_32_int80.sym_VDSO32_NOTE_MASK; |
Jeremy Fitzhardinge | 6a52e4b | 2008-07-12 02:22:00 -0700 | [diff] [blame] | 727 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
Andy Lutomirski | 6f121e5 | 2014-05-05 12:19:34 -0700 | [diff] [blame] | 728 | mask = vdso_image_32_sysenter.data + |
| 729 | vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK; |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 730 | *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 731 | #endif |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 732 | } |
| 733 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 734 | static int register_callback(unsigned type, const void *func) |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 735 | { |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 736 | struct callback_register callback = { |
| 737 | .type = type, |
| 738 | .address = XEN_CALLBACK(__KERNEL_CS, func), |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 739 | .flags = CALLBACKF_mask_events, |
| 740 | }; |
| 741 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 742 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
| 743 | } |
| 744 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 745 | void xen_enable_sysenter(void) |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 746 | { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 747 | int ret; |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 748 | unsigned sysenter_feature; |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 749 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 750 | #ifdef CONFIG_X86_32 |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 751 | sysenter_feature = X86_FEATURE_SEP; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 752 | #else |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 753 | sysenter_feature = X86_FEATURE_SYSENTER32; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 754 | #endif |
| 755 | |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 756 | if (!boot_cpu_has(sysenter_feature)) |
| 757 | return; |
| 758 | |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 759 | ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 760 | if(ret != 0) |
| 761 | setup_clear_cpu_cap(sysenter_feature); |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 762 | } |
| 763 | |
Paul Gortmaker | 148f9bb | 2013-06-18 18:23:59 -0400 | [diff] [blame] | 764 | void xen_enable_syscall(void) |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 765 | { |
| 766 | #ifdef CONFIG_X86_64 |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 767 | int ret; |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 768 | |
| 769 | ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); |
| 770 | if (ret != 0) { |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 771 | printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 772 | /* Pretty fatal; 64-bit userspace has no other |
| 773 | mechanism for syscalls. */ |
| 774 | } |
| 775 | |
| 776 | if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 777 | ret = register_callback(CALLBACKTYPE_syscall32, |
| 778 | xen_syscall32_target); |
Jeremy Fitzhardinge | d5303b8 | 2008-07-12 02:22:06 -0700 | [diff] [blame] | 779 | if (ret != 0) |
Jeremy Fitzhardinge | 62541c3 | 2008-07-10 16:24:08 -0700 | [diff] [blame] | 780 | setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 781 | } |
| 782 | #endif /* CONFIG_X86_64 */ |
| 783 | } |
David Vrabel | ea9f927 | 2014-06-16 13:07:00 +0200 | [diff] [blame] | 784 | |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 785 | void __init xen_pvmmu_arch_setup(void) |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 786 | { |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 787 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); |
| 788 | HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); |
| 789 | |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 790 | HYPERVISOR_vm_assist(VMASST_CMD_enable, |
| 791 | VMASST_TYPE_pae_extended_cr3); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 792 | |
Jeremy Fitzhardinge | 88459d4 | 2008-07-08 15:07:02 -0700 | [diff] [blame] | 793 | if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || |
| 794 | register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) |
| 795 | BUG(); |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 796 | |
Jeremy Fitzhardinge | e2a81ba | 2008-03-17 16:37:17 -0700 | [diff] [blame] | 797 | xen_enable_sysenter(); |
Jeremy Fitzhardinge | 6fcac6d | 2008-07-08 15:07:14 -0700 | [diff] [blame] | 798 | xen_enable_syscall(); |
Mukesh Rathor | d285d68 | 2013-12-13 12:45:31 -0500 | [diff] [blame] | 799 | } |
| 800 | |
| 801 | /* This function is not called for HVM domains */ |
| 802 | void __init xen_arch_setup(void) |
| 803 | { |
| 804 | xen_panic_handler_init(); |
| 805 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
| 806 | xen_pvmmu_arch_setup(); |
| 807 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 808 | #ifdef CONFIG_ACPI |
| 809 | if (!(xen_start_info->flags & SIF_INITDOMAIN)) { |
| 810 | printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); |
| 811 | disable_acpi(); |
| 812 | } |
| 813 | #endif |
| 814 | |
| 815 | memcpy(boot_command_line, xen_start_info->cmd_line, |
| 816 | MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? |
| 817 | COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); |
| 818 | |
Jeremy Fitzhardinge | bc15fde | 2010-11-22 17:17:50 -0800 | [diff] [blame] | 819 | /* Set up idle, making sure it calls safe_halt() pvop */ |
Len Brown | d91ee58 | 2011-04-01 18:28:35 -0400 | [diff] [blame] | 820 | disable_cpuidle(); |
Konrad Rzeszutek Wilk | 48cdd82 | 2012-03-13 20:06:57 -0400 | [diff] [blame] | 821 | disable_cpufreq(); |
Len Brown | 6a377dd | 2013-02-09 23:08:07 -0500 | [diff] [blame] | 822 | WARN_ON(xen_set_default_idle()); |
Roland McGrath | d2eea68 | 2007-07-20 00:31:43 -0700 | [diff] [blame] | 823 | fiddle_vdso(); |
Konrad Rzeszutek Wilk | 8d54db79 | 2012-08-17 10:22:37 -0400 | [diff] [blame] | 824 | #ifdef CONFIG_NUMA |
| 825 | numa_off = 1; |
| 826 | #endif |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 827 | } |