blob: e0b6912f9cad732a5d57bf7b5c8c36a80fb3eb81 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070030#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070031#include "vdso.h"
Matt Rushton4fbb67e32014-08-11 11:57:57 -070032#include "p2m.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
35/* These are code, but not functions. Defined in entry.S */
36extern const char xen_hypervisor_callback[];
37extern const char xen_failsafe_callback[];
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040038#ifdef CONFIG_X86_64
Andi Kleen07ba06d2013-10-22 09:07:59 -070039extern asmlinkage void nmi(void);
Konrad Rzeszutek Wilk6efa20e2013-07-19 11:51:31 -040040#endif
Tejf63c2f22008-12-16 11:56:06 -080041extern void xen_sysenter_target(void);
42extern void xen_syscall_target(void);
43extern void xen_syscall32_target(void);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070044
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070045/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010046struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070047
David Vrabelaa244112011-09-28 17:46:32 +010048/* Number of pages released from the initial allocation. */
49unsigned long xen_released_pages;
50
Juergen Gross1f3ac862014-11-28 11:53:53 +010051/*
52 * Buffer used to remap identity mapped pages. We only need the virtual space.
53 * The physical page behind this address is remapped as needed to different
54 * buffer pages.
55 */
56#define REMAP_SIZE (P2M_PER_PAGE - 3)
57static struct {
58 unsigned long next_area_mfn;
59 unsigned long target_pfn;
60 unsigned long size;
61 unsigned long mfns[REMAP_SIZE];
62} xen_remap_buf __initdata __aligned(PAGE_SIZE);
63static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070064
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070065/*
66 * The maximum amount of extra memory compared to the base size. The
67 * main scaling factor is the size of struct page. At extreme ratios
68 * of base:extra, all the base memory can be filled with page
69 * structures for the extra memory, leaving no space for anything
70 * else.
71 *
72 * 10x seems like a reasonable balance between scaling flexibility and
73 * leaving a practically usable system.
74 */
75#define EXTRA_MEM_RATIO (10)
76
David Vrabeldc91c722011-09-29 12:26:19 +010077static void __init xen_add_extra_mem(u64 start, u64 size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070078{
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050079 unsigned long pfn;
David Vrabeldc91c722011-09-29 12:26:19 +010080 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050081
David Vrabeldc91c722011-09-29 12:26:19 +010082 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
83 /* Add new region. */
84 if (xen_extra_mem[i].size == 0) {
85 xen_extra_mem[i].start = start;
86 xen_extra_mem[i].size = size;
87 break;
88 }
89 /* Append to existing region. */
90 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
91 xen_extra_mem[i].size += size;
92 break;
93 }
94 }
95 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
96 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070097
Tejun Heod4bbf7e2011-11-28 09:46:22 -080098 memblock_reserve(start, size);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070099
David Vrabeldc91c722011-09-29 12:26:19 +0100100 xen_max_p2m_pfn = PFN_DOWN(start + size);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400101 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
102 unsigned long mfn = pfn_to_mfn(pfn);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700103
David Vrabel2dcc9a32014-01-07 11:36:53 +0000104 if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400105 continue;
David Vrabel2dcc9a32014-01-07 11:36:53 +0000106 WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
107 pfn, mfn);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400108
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -0500109 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400110 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700111}
112
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700113/*
114 * Finds the next RAM pfn available in the E820 map after min_pfn.
115 * This function updates min_pfn with the pfn found and returns
116 * the size of that range or zero if not found.
117 */
118static unsigned long __init xen_find_pfn_range(
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400119 const struct e820entry *list, size_t map_size,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700120 unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400121{
122 const struct e820entry *entry;
123 unsigned int i;
124 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400125
126 for (i = 0, entry = list; i < map_size; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400127 unsigned long s_pfn;
128 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400129
130 if (entry->type != E820_RAM)
131 continue;
132
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800133 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400134
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700135 /* We only care about E820 after this */
136 if (e_pfn < *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400137 continue;
138
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800139 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700140
141 /* If min_pfn falls within the E820 entry, we want to start
142 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400143 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700144 if (s_pfn <= *min_pfn) {
145 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400146 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700147 done = e_pfn - s_pfn;
148 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400149 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700150 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400151 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700152
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400153 return done;
154}
David Vrabel83d51ab2012-05-03 16:15:42 +0100155
Juergen Gross1f3ac862014-11-28 11:53:53 +0100156static int __init xen_free_mfn(unsigned long mfn)
157{
158 struct xen_memory_reservation reservation = {
159 .address_bits = 0,
160 .extent_order = 0,
161 .domid = DOMID_SELF
162 };
163
164 set_xen_guest_handle(reservation.extent_start, &mfn);
165 reservation.nr_extents = 1;
166
167 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
168}
169
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700170/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100171 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700172 * as a fallback if the remapping fails.
173 */
174static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
175 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
176 unsigned long *released)
David Vrabel83d51ab2012-05-03 16:15:42 +0100177{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100178 unsigned long len = 0;
179 unsigned long pfn, end;
180 int ret;
181
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700182 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100183
Juergen Gross1f3ac862014-11-28 11:53:53 +0100184 end = min(end_pfn, nr_pages);
185 for (pfn = start_pfn; pfn < end; pfn++) {
186 unsigned long mfn = pfn_to_mfn(pfn);
187
188 /* Make sure pfn exists to start with */
189 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
190 continue;
191
192 ret = xen_free_mfn(mfn);
193 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
194
195 if (ret == 1) {
196 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
197 break;
198 len++;
199 } else
200 break;
201 }
202
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700203 /* Need to release pages first */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100204 *released += len;
David Vrabel83d51ab2012-05-03 16:15:42 +0100205 *identity += set_phys_range_identity(start_pfn, end_pfn);
206}
207
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700208/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100209 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700210 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100211static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700212{
213 struct mmu_update update = {
214 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
215 .val = pfn
216 };
217
218 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100219 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700220 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
221 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100222 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700223 }
224
225 /* Update m2p */
226 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
227 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
228 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100229 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700230 }
231
Juergen Gross1f3ac862014-11-28 11:53:53 +0100232 /* Update kernel mapping, but not for highmem. */
233 if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
234 return;
235
236 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
237 mfn_pte(mfn, PAGE_KERNEL), 0)) {
238 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
239 mfn, pfn);
240 BUG();
241 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700242}
243
244/*
245 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100246 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
247 * original allocation at remap_pfn. The information needed for remapping is
248 * saved in the memory itself to avoid the need for allocating buffers. The
249 * complete remap information is contained in a list of MFNs each containing
250 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
251 * This enables us to preserve the original mfn sequence while doing the
252 * remapping at a time when the memory management is capable of allocating
253 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
254 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700255 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100256static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700257 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
258{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100259 unsigned long buf = (unsigned long)&xen_remap_buf;
260 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700261 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100262 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700263 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100264 unsigned long ident_cnt = 0;
265 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700266
267 WARN_ON(size == 0);
268
269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
270
Juergen Gross1f3ac862014-11-28 11:53:53 +0100271 /* Don't use memory until remapped */
272 memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size));
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700273
Juergen Gross1f3ac862014-11-28 11:53:53 +0100274 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700275
Juergen Gross1f3ac862014-11-28 11:53:53 +0100276 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
277 ident_pfn_iter < ident_end_pfn;
278 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
279 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700280
Juergen Gross1f3ac862014-11-28 11:53:53 +0100281 /* Map first pfn to xen_remap_buf */
282 mfn = pfn_to_mfn(ident_pfn_iter);
283 set_pte_mfn(buf, mfn, PAGE_KERNEL);
284
285 /* Save mapping information in page */
286 xen_remap_buf.next_area_mfn = xen_remap_mfn;
287 xen_remap_buf.target_pfn = remap_pfn_iter;
288 xen_remap_buf.size = chunk;
289 for (i = 0; i < chunk; i++)
290 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
291
292 /* Put remap buf into list. */
293 xen_remap_mfn = mfn;
294
295 /* Set identity map */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700296 ident_cnt += set_phys_range_identity(ident_pfn_iter,
Juergen Gross1f3ac862014-11-28 11:53:53 +0100297 ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700298
Juergen Gross1f3ac862014-11-28 11:53:53 +0100299 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700300 }
301
Juergen Gross1f3ac862014-11-28 11:53:53 +0100302 /* Restore old xen_remap_buf mapping */
303 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700304}
305
306/*
307 * This function takes a contiguous pfn range that needs to be identity mapped
308 * and:
309 *
310 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
311 * 2) Calls the do_ function to actually do the mapping/remapping work.
312 *
313 * The goal is to not allocate additional memory but to remap the existing
314 * pages. In the case of an error the underlying memory is simply released back
315 * to Xen and not remapped.
316 */
317static unsigned long __init xen_set_identity_and_remap_chunk(
318 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
319 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
Juergen Gross1f3ac862014-11-28 11:53:53 +0100320 unsigned long *identity, unsigned long *released)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700321{
322 unsigned long pfn;
323 unsigned long i = 0;
324 unsigned long n = end_pfn - start_pfn;
325
326 while (i < n) {
327 unsigned long cur_pfn = start_pfn + i;
328 unsigned long left = n - i;
329 unsigned long size = left;
330 unsigned long remap_range_size;
331
332 /* Do not remap pages beyond the current allocation */
333 if (cur_pfn >= nr_pages) {
334 /* Identity map remaining pages */
335 *identity += set_phys_range_identity(cur_pfn,
336 cur_pfn + size);
337 break;
338 }
339 if (cur_pfn + size > nr_pages)
340 size = nr_pages - cur_pfn;
341
342 remap_range_size = xen_find_pfn_range(list, map_size,
343 &remap_pfn);
344 if (!remap_range_size) {
345 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
346 xen_set_identity_and_release_chunk(cur_pfn,
347 cur_pfn + left, nr_pages, identity, released);
348 break;
349 }
350 /* Adjust size to fit in current e820 RAM region */
351 if (size > remap_range_size)
352 size = remap_range_size;
353
Juergen Gross1f3ac862014-11-28 11:53:53 +0100354 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700355
356 /* Update variables to reflect new mappings. */
357 i += size;
358 remap_pfn += size;
359 *identity += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700360 }
361
362 /*
363 * If the PFNs are currently mapped, the VA mapping also needs
364 * to be updated to be 1:1.
365 */
366 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
367 (void)HYPERVISOR_update_va_mapping(
368 (unsigned long)__va(pfn << PAGE_SHIFT),
369 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
370
371 return remap_pfn;
372}
373
374static unsigned long __init xen_set_identity_and_remap(
375 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
376 unsigned long *released)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400377{
David Vrabelf3f436e2011-09-28 17:46:36 +0100378 phys_addr_t start = 0;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500379 unsigned long identity = 0;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700380 unsigned long last_pfn = nr_pages;
David Vrabelf3f436e2011-09-28 17:46:36 +0100381 const struct e820entry *entry;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700382 unsigned long num_released = 0;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500383 int i;
384
David Vrabelf3f436e2011-09-28 17:46:36 +0100385 /*
386 * Combine non-RAM regions and gaps until a RAM region (or the
387 * end of the map) is reached, then set the 1:1 map and
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700388 * remap the memory in those non-RAM regions.
David Vrabelf3f436e2011-09-28 17:46:36 +0100389 *
390 * The combined non-RAM regions are rounded to a whole number
391 * of pages so any partial pages are accessible via the 1:1
392 * mapping. This is needed for some BIOSes that put (for
393 * example) the DMI tables in a reserved region that begins on
394 * a non-page boundary.
395 */
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500396 for (i = 0, entry = list; i < map_size; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100397 phys_addr_t end = entry->addr + entry->size;
David Vrabelf3f436e2011-09-28 17:46:36 +0100398 if (entry->type == E820_RAM || i == map_size - 1) {
399 unsigned long start_pfn = PFN_DOWN(start);
400 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500401
David Vrabelf3f436e2011-09-28 17:46:36 +0100402 if (entry->type == E820_RAM)
403 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500404
David Vrabel83d51ab2012-05-03 16:15:42 +0100405 if (start_pfn < end_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700406 last_pfn = xen_set_identity_and_remap_chunk(
407 list, map_size, start_pfn,
408 end_pfn, nr_pages, last_pfn,
Juergen Gross1f3ac862014-11-28 11:53:53 +0100409 &identity, &num_released);
David Vrabelf3f436e2011-09-28 17:46:36 +0100410 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500411 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500412 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100413
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700414 *released = num_released;
David Vrabelf3f436e2011-09-28 17:46:36 +0100415
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700416 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700417 pr_info("Released %ld page(s)\n", num_released);
418
419 return last_pfn;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500420}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100421
422/*
423 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
424 * The remap information (which mfn remap to which pfn) is contained in the
425 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
426 * This scheme allows to remap the different chunks in arbitrary order while
427 * the resulting mapping will be independant from the order.
428 */
429void __init xen_remap_memory(void)
430{
431 unsigned long buf = (unsigned long)&xen_remap_buf;
432 unsigned long mfn_save, mfn, pfn;
433 unsigned long remapped = 0;
434 unsigned int i;
435 unsigned long pfn_s = ~0UL;
436 unsigned long len = 0;
437
438 mfn_save = virt_to_mfn(buf);
439
440 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
441 /* Map the remap information */
442 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
443
444 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
445
446 pfn = xen_remap_buf.target_pfn;
447 for (i = 0; i < xen_remap_buf.size; i++) {
448 mfn = xen_remap_buf.mfns[i];
449 xen_update_mem_tables(pfn, mfn);
450 remapped++;
451 pfn++;
452 }
453 if (pfn_s == ~0UL || pfn == pfn_s) {
454 pfn_s = xen_remap_buf.target_pfn;
455 len += xen_remap_buf.size;
456 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
457 len += xen_remap_buf.size;
458 } else {
459 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
460 pfn_s = xen_remap_buf.target_pfn;
461 len = xen_remap_buf.size;
462 }
463
464 mfn = xen_remap_mfn;
465 xen_remap_mfn = xen_remap_buf.next_area_mfn;
466 }
467
468 if (pfn_s != ~0UL && len)
469 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
470
471 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
472
473 pr_info("Remapped %ld page(s)\n", remapped);
474}
475
David Vrabeld312ae872011-08-19 15:57:16 +0100476static unsigned long __init xen_get_max_pages(void)
477{
478 unsigned long max_pages = MAX_DOMAIN_PAGES;
479 domid_t domid = DOMID_SELF;
480 int ret;
481
Ian Campbelld3db7282011-12-14 12:16:08 +0000482 /*
483 * For the initial domain we use the maximum reservation as
484 * the maximum page.
485 *
486 * For guest domains the current maximum reservation reflects
487 * the current maximum rather than the static maximum. In this
488 * case the e820 map provided to us will cover the static
489 * maximum region.
490 */
491 if (xen_initial_domain()) {
492 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
493 if (ret > 0)
494 max_pages = ret;
495 }
496
David Vrabeld312ae872011-08-19 15:57:16 +0100497 return min(max_pages, MAX_DOMAIN_PAGES);
498}
499
David Vrabeldc91c722011-09-29 12:26:19 +0100500static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
501{
502 u64 end = start + size;
503
504 /* Align RAM regions to page boundaries. */
505 if (type == E820_RAM) {
506 start = PAGE_ALIGN(start);
507 end &= ~((u64)PAGE_SIZE - 1);
508 }
509
510 e820_add_region(start, end - start, type);
511}
512
David Vrabel3bc38cb2013-08-16 15:42:55 +0100513void xen_ignore_unusable(struct e820entry *list, size_t map_size)
514{
515 struct e820entry *entry;
516 unsigned int i;
517
518 for (i = 0, entry = list; i < map_size; i++, entry++) {
519 if (entry->type == E820_UNUSABLE)
520 entry->type = E820_RAM;
521 }
522}
523
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700524/**
525 * machine_specific_memory_setup - Hook for machine specific memory setup.
526 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700527char * __init xen_memory_setup(void)
528{
Ian Campbell35ae11f2009-02-06 19:09:48 -0800529 static struct e820entry map[E820MAX] __initdata;
530
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700531 unsigned long max_pfn = xen_start_info->nr_pages;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800532 unsigned long long mem_end;
533 int rc;
534 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100535 unsigned long max_pages;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400536 unsigned long last_pfn = 0;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700537 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800538 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100539 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700540
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100541 max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800542 mem_end = PFN_PHYS(max_pfn);
543
544 memmap.nr_entries = E820MAX;
545 set_xen_guest_handle(memmap.buffer, map);
546
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100547 op = xen_initial_domain() ?
548 XENMEM_machine_memory_map :
549 XENMEM_memory_map;
550 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800551 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700552 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800553 memmap.nr_entries = 1;
554 map[0].addr = 0ULL;
555 map[0].size = mem_end;
556 /* 8MB slack (to balance backend allocations). */
557 map[0].size += 8ULL << 20;
558 map[0].type = E820_RAM;
559 rc = 0;
560 }
561 BUG_ON(rc);
Martin Kelly1ea644c2014-10-16 20:48:11 -0700562 BUG_ON(memmap.nr_entries == 0);
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100563
David Vrabel3bc38cb2013-08-16 15:42:55 +0100564 /*
565 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
566 * regions, so if we're using the machine memory map leave the
567 * region as RAM as it is in the pseudo-physical map.
568 *
569 * UNUSABLE regions in domUs are not handled and will need
570 * a patch in the future.
571 */
572 if (xen_initial_domain())
573 xen_ignore_unusable(map, memmap.nr_entries);
574
David Vrabeldc91c722011-09-29 12:26:19 +0100575 /* Make sure the Xen-supplied memory map is well-ordered. */
576 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700577
David Vrabeldc91c722011-09-29 12:26:19 +0100578 max_pages = xen_get_max_pages();
579 if (max_pages > max_pfn)
580 extra_pages += max_pages - max_pfn;
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500581
David Vrabelf3f436e2011-09-28 17:46:36 +0100582 /*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100583 * Set identity map on non-RAM pages and prepare remapping the
584 * underlying RAM.
David Vrabelf3f436e2011-09-28 17:46:36 +0100585 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700586 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
587 &xen_released_pages);
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700588
Konrad Rzeszutek Wilk58b7b532012-05-29 12:36:43 -0400589 extra_pages += xen_released_pages;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400590
591 if (last_pfn > max_pfn) {
592 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
593 mem_end = PFN_PHYS(max_pfn);
594 }
595 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100596 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
597 * factor the base size. On non-highmem systems, the base
598 * size is the full initial memory allocation; on highmem it
599 * is limited to the max size of lowmem, so that it doesn't
600 * get completely filled.
601 *
602 * In principle there could be a problem in lowmem systems if
603 * the initial memory is also very large with respect to
604 * lowmem, but we won't try to deal with that here.
605 */
606 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
607 extra_pages);
David Vrabeldc91c722011-09-29 12:26:19 +0100608 i = 0;
609 while (i < memmap.nr_entries) {
610 u64 addr = map[i].addr;
611 u64 size = map[i].size;
612 u32 type = map[i].type;
613
614 if (type == E820_RAM) {
615 if (addr < mem_end) {
616 size = min(size, mem_end - addr);
617 } else if (extra_pages) {
618 size = min(size, (u64)extra_pages * PAGE_SIZE);
619 extra_pages -= size / PAGE_SIZE;
620 xen_add_extra_mem(addr, size);
621 } else
622 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700623 }
624
David Vrabeldc91c722011-09-29 12:26:19 +0100625 xen_align_and_add_e820_region(addr, size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700626
David Vrabeldc91c722011-09-29 12:26:19 +0100627 map[i].addr += size;
628 map[i].size -= size;
629 if (map[i].size == 0)
630 i++;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800631 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700632
633 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000634 * Set the rest as identity mapped, in case PCI BARs are
635 * located here.
636 *
637 * PFNs above MAX_P2M_PFN are considered identity mapped as
638 * well.
639 */
640 set_phys_range_identity(map[i-1].addr / PAGE_SIZE, ~0ul);
641
642 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700643 * In domU, the ISA region is normal, usable memory, but we
644 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700645 * about in there.
646 */
647 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
648 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700649
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700650 /*
651 * Reserve Xen bits:
652 * - mfn_list
653 * - xen_start_info
654 * See comment above "struct start_info" in <xen/interface/xen.h>
Konrad Rzeszutek Wilk51faaf22012-08-22 13:00:10 -0400655 * We tried to make the the memblock_reserve more selective so
656 * that it would be clear what region is reserved. Sadly we ran
657 * in the problem wherein on a 64-bit hypervisor with a 32-bit
658 * initial domain, the pt_base has the cr3 value which is not
659 * neccessarily where the pagetable starts! As Jan put it: "
660 * Actually, the adjustment turns out to be correct: The page
661 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
662 * "first L2", "first L3", so the offset to the page table base is
663 * indeed 2. When reading xen/include/public/xen.h's comment
664 * very strictly, this is not a violation (since there nothing is said
665 * that the first thing in the page table space is pointed to by
666 * pt_base; I admit that this seems to be implied though, namely
667 * do I think that it is implied that the page table space is the
668 * range [pt_base, pt_base + nt_pt_frames), whereas that
669 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
670 * which - without a priori knowledge - the kernel would have
671 * difficulty to figure out)." - so lets just fall back to the
672 * easy way and reserve the whole region.
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700673 */
Tejun Heo24aa0782011-07-12 11:16:06 +0200674 memblock_reserve(__pa(xen_start_info->mfn_list),
675 xen_start_info->pt_base - xen_start_info->mfn_list);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700676
677 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
678
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700679 return "Xen";
680}
681
Roland McGrathd2eea682007-07-20 00:31:43 -0700682/*
David Vrabelabacaad2014-06-02 17:58:01 +0100683 * Machine specific memory setup for auto-translated guests.
684 */
685char * __init xen_auto_xlated_memory_setup(void)
686{
687 static struct e820entry map[E820MAX] __initdata;
688
689 struct xen_memory_map memmap;
690 int i;
691 int rc;
692
693 memmap.nr_entries = E820MAX;
694 set_xen_guest_handle(memmap.buffer, map);
695
696 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
697 if (rc < 0)
698 panic("No memory map (%d)\n", rc);
699
700 sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
701
702 for (i = 0; i < memmap.nr_entries; i++)
703 e820_add_region(map[i].addr, map[i].size, map[i].type);
704
705 memblock_reserve(__pa(xen_start_info->mfn_list),
706 xen_start_info->pt_base - xen_start_info->mfn_list);
707
708 return "Xen";
709}
710
711/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700712 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700713 * We only need to bother in pure 32-bit mode; compat 32-bit processes
714 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700715 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100716static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700717{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700718#ifdef CONFIG_X86_32
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700719 /*
720 * This could be called before selected_vdso32 is initialized, so
721 * just fiddle with both possible images. vdso_image_32_syscall
722 * can't be selected, since it only exists on 64-bit systems.
723 */
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700724 u32 *mask;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700725 mask = vdso_image_32_int80.data +
726 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700727 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700728 mask = vdso_image_32_sysenter.data +
729 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700730 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700731#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700732}
733
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400734static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700735{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700736 struct callback_register callback = {
737 .type = type,
738 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700739 .flags = CALLBACKF_mask_events,
740 };
741
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700742 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
743}
744
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400745void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700746{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700747 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700748 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700749
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700750#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700751 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700752#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700753 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700754#endif
755
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700756 if (!boot_cpu_has(sysenter_feature))
757 return;
758
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700759 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700760 if(ret != 0)
761 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700762}
763
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400764void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700765{
766#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700767 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700768
769 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
770 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700771 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700772 /* Pretty fatal; 64-bit userspace has no other
773 mechanism for syscalls. */
774 }
775
776 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700777 ret = register_callback(CALLBACKTYPE_syscall32,
778 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700779 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700780 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700781 }
782#endif /* CONFIG_X86_64 */
783}
David Vrabelea9f9272014-06-16 13:07:00 +0200784
Mukesh Rathord285d682013-12-13 12:45:31 -0500785void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700786{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700787 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
788 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
789
Mukesh Rathord285d682013-12-13 12:45:31 -0500790 HYPERVISOR_vm_assist(VMASST_CMD_enable,
791 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700792
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700793 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
794 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
795 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700796
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700797 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700798 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -0500799}
800
801/* This function is not called for HVM domains */
802void __init xen_arch_setup(void)
803{
804 xen_panic_handler_init();
805 if (!xen_feature(XENFEAT_auto_translated_physmap))
806 xen_pvmmu_arch_setup();
807
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700808#ifdef CONFIG_ACPI
809 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
810 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
811 disable_acpi();
812 }
813#endif
814
815 memcpy(boot_command_line, xen_start_info->cmd_line,
816 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
817 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
818
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -0800819 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -0400820 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -0400821 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -0500822 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -0700823 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -0400824#ifdef CONFIG_NUMA
825 numa_off = 1;
826#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700827}