blob: 70de4c8b8f271dbfeeb9f593455b08d2550f6e16 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070017#include <asm/e820.h>
18#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Juergen Gross808fdb72015-07-17 06:51:30 +020030#include <xen/hvc-console.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070031#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070032#include "vdso.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
Juergen Grossc70727a2015-07-17 06:51:36 +020035#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
36
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070037/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010038struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070039
David Vrabelaa244112011-09-28 17:46:32 +010040/* Number of pages released from the initial allocation. */
41unsigned long xen_released_pages;
42
Juergen Gross69632ec2015-07-17 06:51:26 +020043/* E820 map used during setting up memory. */
44static struct e820entry xen_e820_map[E820MAX] __initdata;
45static u32 xen_e820_map_entries __initdata;
46
Juergen Gross1f3ac862014-11-28 11:53:53 +010047/*
48 * Buffer used to remap identity mapped pages. We only need the virtual space.
49 * The physical page behind this address is remapped as needed to different
50 * buffer pages.
51 */
52#define REMAP_SIZE (P2M_PER_PAGE - 3)
53static struct {
54 unsigned long next_area_mfn;
55 unsigned long target_pfn;
56 unsigned long size;
57 unsigned long mfns[REMAP_SIZE];
58} xen_remap_buf __initdata __aligned(PAGE_SIZE);
59static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070060
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070061/*
62 * The maximum amount of extra memory compared to the base size. The
63 * main scaling factor is the size of struct page. At extreme ratios
64 * of base:extra, all the base memory can be filled with page
65 * structures for the extra memory, leaving no space for anything
66 * else.
67 *
68 * 10x seems like a reasonable balance between scaling flexibility and
69 * leaving a practically usable system.
70 */
71#define EXTRA_MEM_RATIO (10)
72
Juergen Grossc70727a2015-07-17 06:51:36 +020073static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
74
75static void __init xen_parse_512gb(void)
76{
77 bool val = false;
78 char *arg;
79
80 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
81 if (!arg)
82 return;
83
84 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
85 if (!arg)
86 val = true;
87 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
88 return;
89
90 xen_512gb_limit = val;
91}
92
Juergen Gross3ba5c862015-01-28 07:44:22 +010093static void __init xen_add_extra_mem(phys_addr_t start, phys_addr_t size)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070094{
David Vrabeldc91c722011-09-29 12:26:19 +010095 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050096
David Vrabeldc91c722011-09-29 12:26:19 +010097 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
98 /* Add new region. */
99 if (xen_extra_mem[i].size == 0) {
100 xen_extra_mem[i].start = start;
101 xen_extra_mem[i].size = size;
102 break;
103 }
104 /* Append to existing region. */
105 if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
106 xen_extra_mem[i].size += size;
107 break;
108 }
109 }
110 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
111 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700112
Tejun Heod4bbf7e2011-11-28 09:46:22 -0800113 memblock_reserve(start, size);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100114}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700115
Juergen Gross3ba5c862015-01-28 07:44:22 +0100116static void __init xen_del_extra_mem(phys_addr_t start, phys_addr_t size)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100117{
118 int i;
Juergen Gross3ba5c862015-01-28 07:44:22 +0100119 phys_addr_t start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700120
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100121 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
122 start_r = xen_extra_mem[i].start;
123 size_r = xen_extra_mem[i].size;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400124
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100125 /* Start of region. */
126 if (start_r == start) {
127 BUG_ON(size > size_r);
128 xen_extra_mem[i].start += size;
129 xen_extra_mem[i].size -= size;
130 break;
131 }
132 /* End of region. */
133 if (start_r + size_r == start + size) {
134 BUG_ON(size > size_r);
135 xen_extra_mem[i].size -= size;
136 break;
137 }
138 /* Mid of region. */
139 if (start > start_r && start < start_r + size_r) {
140 BUG_ON(start + size > start_r + size_r);
141 xen_extra_mem[i].size = start - start_r;
142 /* Calling memblock_reserve() again is okay. */
143 xen_add_extra_mem(start + size, start_r + size_r -
144 (start + size));
145 break;
146 }
147 }
148 memblock_free(start, size);
149}
150
151/*
152 * Called during boot before the p2m list can take entries beyond the
153 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
154 * invalid.
155 */
156unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
157{
158 int i;
Juergen Grosse86f9492015-01-12 06:05:09 +0100159 phys_addr_t addr = PFN_PHYS(pfn);
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100160
161 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
162 if (addr >= xen_extra_mem[i].start &&
163 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
164 return INVALID_P2M_ENTRY;
165 }
166
167 return IDENTITY_FRAME(pfn);
168}
169
170/*
171 * Mark all pfns of extra mem as invalid in p2m list.
172 */
173void __init xen_inv_extra_mem(void)
174{
175 unsigned long pfn, pfn_s, pfn_e;
176 int i;
177
178 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross9a17ad72015-01-12 06:05:10 +0100179 if (!xen_extra_mem[i].size)
180 continue;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100181 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
182 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
183 for (pfn = pfn_s; pfn < pfn_e; pfn++)
184 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400185 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700186}
187
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700188/*
189 * Finds the next RAM pfn available in the E820 map after min_pfn.
190 * This function updates min_pfn with the pfn found and returns
191 * the size of that range or zero if not found.
192 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200193static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400194{
Juergen Gross69632ec2015-07-17 06:51:26 +0200195 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400196 unsigned int i;
197 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400198
Juergen Gross69632ec2015-07-17 06:51:26 +0200199 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400200 unsigned long s_pfn;
201 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400202
203 if (entry->type != E820_RAM)
204 continue;
205
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800206 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400207
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700208 /* We only care about E820 after this */
209 if (e_pfn < *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400210 continue;
211
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800212 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700213
214 /* If min_pfn falls within the E820 entry, we want to start
215 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400216 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700217 if (s_pfn <= *min_pfn) {
218 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400219 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700220 done = e_pfn - s_pfn;
221 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400222 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700223 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400224 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700225
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400226 return done;
227}
David Vrabel83d51ab2012-05-03 16:15:42 +0100228
Juergen Gross1f3ac862014-11-28 11:53:53 +0100229static int __init xen_free_mfn(unsigned long mfn)
230{
231 struct xen_memory_reservation reservation = {
232 .address_bits = 0,
233 .extent_order = 0,
234 .domid = DOMID_SELF
235 };
236
237 set_xen_guest_handle(reservation.extent_start, &mfn);
238 reservation.nr_extents = 1;
239
240 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
241}
242
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700243/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100244 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700245 * as a fallback if the remapping fails.
246 */
247static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200248 unsigned long end_pfn, unsigned long nr_pages)
David Vrabel83d51ab2012-05-03 16:15:42 +0100249{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100250 unsigned long pfn, end;
251 int ret;
252
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700253 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100254
David Vrabelbc7142c2015-01-07 11:01:08 +0000255 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100256 end = min(end_pfn, nr_pages);
257 for (pfn = start_pfn; pfn < end; pfn++) {
258 unsigned long mfn = pfn_to_mfn(pfn);
259
260 /* Make sure pfn exists to start with */
261 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
262 continue;
263
264 ret = xen_free_mfn(mfn);
265 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
266
267 if (ret == 1) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200268 xen_released_pages++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100269 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
270 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100271 } else
272 break;
273 }
274
David Vrabelbc7142c2015-01-07 11:01:08 +0000275 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100276}
277
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700278/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100279 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700280 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100281static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700282{
283 struct mmu_update update = {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100284 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700285 .val = pfn
286 };
287
288 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100289 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700290 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
291 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100292 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700293 }
294
295 /* Update m2p */
296 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
297 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
298 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100299 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700300 }
301
Juergen Gross1f3ac862014-11-28 11:53:53 +0100302 /* Update kernel mapping, but not for highmem. */
Juergen Grosse86f9492015-01-12 06:05:09 +0100303 if (pfn >= PFN_UP(__pa(high_memory - 1)))
Juergen Gross1f3ac862014-11-28 11:53:53 +0100304 return;
305
306 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
307 mfn_pte(mfn, PAGE_KERNEL), 0)) {
308 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
309 mfn, pfn);
310 BUG();
311 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700312}
313
314/*
315 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100316 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
317 * original allocation at remap_pfn. The information needed for remapping is
318 * saved in the memory itself to avoid the need for allocating buffers. The
319 * complete remap information is contained in a list of MFNs each containing
320 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
321 * This enables us to preserve the original mfn sequence while doing the
322 * remapping at a time when the memory management is capable of allocating
323 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
324 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700325 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100326static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700327 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
328{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100329 unsigned long buf = (unsigned long)&xen_remap_buf;
330 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700331 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100332 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700333 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100334 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700335
336 WARN_ON(size == 0);
337
338 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
339
Juergen Gross1f3ac862014-11-28 11:53:53 +0100340 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700341
Juergen Gross1f3ac862014-11-28 11:53:53 +0100342 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
343 ident_pfn_iter < ident_end_pfn;
344 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
345 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700346
Juergen Gross1f3ac862014-11-28 11:53:53 +0100347 /* Map first pfn to xen_remap_buf */
348 mfn = pfn_to_mfn(ident_pfn_iter);
349 set_pte_mfn(buf, mfn, PAGE_KERNEL);
350
351 /* Save mapping information in page */
352 xen_remap_buf.next_area_mfn = xen_remap_mfn;
353 xen_remap_buf.target_pfn = remap_pfn_iter;
354 xen_remap_buf.size = chunk;
355 for (i = 0; i < chunk; i++)
356 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
357
358 /* Put remap buf into list. */
359 xen_remap_mfn = mfn;
360
361 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000362 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700363
Juergen Gross1f3ac862014-11-28 11:53:53 +0100364 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700365 }
366
Juergen Gross1f3ac862014-11-28 11:53:53 +0100367 /* Restore old xen_remap_buf mapping */
368 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700369}
370
371/*
372 * This function takes a contiguous pfn range that needs to be identity mapped
373 * and:
374 *
375 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
376 * 2) Calls the do_ function to actually do the mapping/remapping work.
377 *
378 * The goal is to not allocate additional memory but to remap the existing
379 * pages. In the case of an error the underlying memory is simply released back
380 * to Xen and not remapped.
381 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100382static unsigned long __init xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200383 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200384 unsigned long remap_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700385{
386 unsigned long pfn;
387 unsigned long i = 0;
388 unsigned long n = end_pfn - start_pfn;
389
390 while (i < n) {
391 unsigned long cur_pfn = start_pfn + i;
392 unsigned long left = n - i;
393 unsigned long size = left;
394 unsigned long remap_range_size;
395
396 /* Do not remap pages beyond the current allocation */
397 if (cur_pfn >= nr_pages) {
398 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000399 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700400 break;
401 }
402 if (cur_pfn + size > nr_pages)
403 size = nr_pages - cur_pfn;
404
Juergen Gross69632ec2015-07-17 06:51:26 +0200405 remap_range_size = xen_find_pfn_range(&remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700406 if (!remap_range_size) {
407 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
408 xen_set_identity_and_release_chunk(cur_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200409 cur_pfn + left, nr_pages);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700410 break;
411 }
412 /* Adjust size to fit in current e820 RAM region */
413 if (size > remap_range_size)
414 size = remap_range_size;
415
Juergen Gross1f3ac862014-11-28 11:53:53 +0100416 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700417
418 /* Update variables to reflect new mappings. */
419 i += size;
420 remap_pfn += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700421 }
422
423 /*
424 * If the PFNs are currently mapped, the VA mapping also needs
425 * to be updated to be 1:1.
426 */
427 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
428 (void)HYPERVISOR_update_va_mapping(
429 (unsigned long)__va(pfn << PAGE_SHIFT),
430 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
431
432 return remap_pfn;
433}
434
Juergen Gross5097cdf2015-07-17 06:51:27 +0200435static void __init xen_set_identity_and_remap(unsigned long nr_pages)
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400436{
David Vrabelf3f436e2011-09-28 17:46:36 +0100437 phys_addr_t start = 0;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700438 unsigned long last_pfn = nr_pages;
Juergen Gross69632ec2015-07-17 06:51:26 +0200439 const struct e820entry *entry = xen_e820_map;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500440 int i;
441
David Vrabelf3f436e2011-09-28 17:46:36 +0100442 /*
443 * Combine non-RAM regions and gaps until a RAM region (or the
444 * end of the map) is reached, then set the 1:1 map and
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700445 * remap the memory in those non-RAM regions.
David Vrabelf3f436e2011-09-28 17:46:36 +0100446 *
447 * The combined non-RAM regions are rounded to a whole number
448 * of pages so any partial pages are accessible via the 1:1
449 * mapping. This is needed for some BIOSes that put (for
450 * example) the DMI tables in a reserved region that begins on
451 * a non-page boundary.
452 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200453 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100454 phys_addr_t end = entry->addr + entry->size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200455 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100456 unsigned long start_pfn = PFN_DOWN(start);
457 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500458
David Vrabelf3f436e2011-09-28 17:46:36 +0100459 if (entry->type == E820_RAM)
460 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500461
David Vrabel83d51ab2012-05-03 16:15:42 +0100462 if (start_pfn < end_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700463 last_pfn = xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200464 start_pfn, end_pfn, nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200465 last_pfn);
David Vrabelf3f436e2011-09-28 17:46:36 +0100466 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500467 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500468 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100469
Juergen Gross5097cdf2015-07-17 06:51:27 +0200470 pr_info("Released %ld page(s)\n", xen_released_pages);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500471}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100472
473/*
474 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
475 * The remap information (which mfn remap to which pfn) is contained in the
476 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
477 * This scheme allows to remap the different chunks in arbitrary order while
478 * the resulting mapping will be independant from the order.
479 */
480void __init xen_remap_memory(void)
481{
482 unsigned long buf = (unsigned long)&xen_remap_buf;
483 unsigned long mfn_save, mfn, pfn;
484 unsigned long remapped = 0;
485 unsigned int i;
486 unsigned long pfn_s = ~0UL;
487 unsigned long len = 0;
488
489 mfn_save = virt_to_mfn(buf);
490
491 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
492 /* Map the remap information */
493 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
494
495 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
496
497 pfn = xen_remap_buf.target_pfn;
498 for (i = 0; i < xen_remap_buf.size; i++) {
499 mfn = xen_remap_buf.mfns[i];
500 xen_update_mem_tables(pfn, mfn);
501 remapped++;
502 pfn++;
503 }
504 if (pfn_s == ~0UL || pfn == pfn_s) {
505 pfn_s = xen_remap_buf.target_pfn;
506 len += xen_remap_buf.size;
507 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
508 len += xen_remap_buf.size;
509 } else {
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100510 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100511 pfn_s = xen_remap_buf.target_pfn;
512 len = xen_remap_buf.size;
513 }
514
515 mfn = xen_remap_mfn;
516 xen_remap_mfn = xen_remap_buf.next_area_mfn;
517 }
518
519 if (pfn_s != ~0UL && len)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100520 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
Juergen Gross1f3ac862014-11-28 11:53:53 +0100521
522 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
523
524 pr_info("Remapped %ld page(s)\n", remapped);
525}
526
Juergen Grossc70727a2015-07-17 06:51:36 +0200527static unsigned long __init xen_get_pages_limit(void)
528{
529 unsigned long limit;
530
531#ifdef CONFIG_X86_32
532 limit = GB(64) / PAGE_SIZE;
533#else
Juergen Grosscb9e4442015-09-04 14:18:08 +0200534 limit = MAXMEM / PAGE_SIZE;
Juergen Grossc70727a2015-07-17 06:51:36 +0200535 if (!xen_initial_domain() && xen_512gb_limit)
536 limit = GB(512) / PAGE_SIZE;
537#endif
538 return limit;
539}
540
David Vrabeld312ae872011-08-19 15:57:16 +0100541static unsigned long __init xen_get_max_pages(void)
542{
Juergen Grossc70727a2015-07-17 06:51:36 +0200543 unsigned long max_pages, limit;
David Vrabeld312ae872011-08-19 15:57:16 +0100544 domid_t domid = DOMID_SELF;
545 int ret;
546
Juergen Grossc70727a2015-07-17 06:51:36 +0200547 limit = xen_get_pages_limit();
548 max_pages = limit;
549
Ian Campbelld3db7282011-12-14 12:16:08 +0000550 /*
551 * For the initial domain we use the maximum reservation as
552 * the maximum page.
553 *
554 * For guest domains the current maximum reservation reflects
555 * the current maximum rather than the static maximum. In this
556 * case the e820 map provided to us will cover the static
557 * maximum region.
558 */
559 if (xen_initial_domain()) {
560 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
561 if (ret > 0)
562 max_pages = ret;
563 }
564
Juergen Grossc70727a2015-07-17 06:51:36 +0200565 return min(max_pages, limit);
David Vrabeld312ae872011-08-19 15:57:16 +0100566}
567
Juergen Grossa3f52392015-01-28 07:44:23 +0100568static void __init xen_align_and_add_e820_region(phys_addr_t start,
569 phys_addr_t size, int type)
David Vrabeldc91c722011-09-29 12:26:19 +0100570{
Juergen Gross3ba5c862015-01-28 07:44:22 +0100571 phys_addr_t end = start + size;
David Vrabeldc91c722011-09-29 12:26:19 +0100572
573 /* Align RAM regions to page boundaries. */
574 if (type == E820_RAM) {
575 start = PAGE_ALIGN(start);
Juergen Gross3ba5c862015-01-28 07:44:22 +0100576 end &= ~((phys_addr_t)PAGE_SIZE - 1);
David Vrabeldc91c722011-09-29 12:26:19 +0100577 }
578
579 e820_add_region(start, end - start, type);
580}
581
Juergen Gross69632ec2015-07-17 06:51:26 +0200582static void __init xen_ignore_unusable(void)
David Vrabel3bc38cb2013-08-16 15:42:55 +0100583{
Juergen Gross69632ec2015-07-17 06:51:26 +0200584 struct e820entry *entry = xen_e820_map;
David Vrabel3bc38cb2013-08-16 15:42:55 +0100585 unsigned int i;
586
Juergen Gross69632ec2015-07-17 06:51:26 +0200587 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
David Vrabel3bc38cb2013-08-16 15:42:55 +0100588 if (entry->type == E820_UNUSABLE)
589 entry->type = E820_RAM;
590 }
591}
592
Juergen Gross5097cdf2015-07-17 06:51:27 +0200593static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
594{
595 unsigned long extra = 0;
Juergen Grossab245072015-08-19 18:53:11 +0200596 unsigned long start_pfn, end_pfn;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200597 const struct e820entry *entry = xen_e820_map;
598 int i;
599
Juergen Grossab245072015-08-19 18:53:11 +0200600 end_pfn = 0;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200601 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
Juergen Grossab245072015-08-19 18:53:11 +0200602 start_pfn = PFN_DOWN(entry->addr);
603 /* Adjacent regions on non-page boundaries handling! */
604 end_pfn = min(end_pfn, start_pfn);
Juergen Gross5097cdf2015-07-17 06:51:27 +0200605
606 if (start_pfn >= max_pfn)
Juergen Grossab245072015-08-19 18:53:11 +0200607 return extra + max_pfn - end_pfn;
608
609 /* Add any holes in map to result. */
610 extra += start_pfn - end_pfn;
611
612 end_pfn = PFN_UP(entry->addr + entry->size);
613 end_pfn = min(end_pfn, max_pfn);
614
615 if (entry->type != E820_RAM)
616 extra += end_pfn - start_pfn;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200617 }
618
619 return extra;
620}
621
Juergen Grosse612b4a2015-07-17 06:51:28 +0200622bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
623{
624 struct e820entry *entry;
625 unsigned mapcnt;
626 phys_addr_t end;
627
628 if (!size)
629 return false;
630
631 end = start + size;
632 entry = xen_e820_map;
633
634 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
635 if (entry->type == E820_RAM && entry->addr <= start &&
636 (entry->addr + entry->size) >= end)
637 return false;
638
639 entry++;
640 }
641
642 return true;
643}
644
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200645/*
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200646 * Find a free area in physical memory not yet reserved and compliant with
647 * E820 map.
648 * Used to relocate pre-allocated areas like initrd or p2m list which are in
649 * conflict with the to be used E820 map.
650 * In case no area is found, return 0. Otherwise return the physical address
651 * of the area which is already reserved for convenience.
652 */
653phys_addr_t __init xen_find_free_area(phys_addr_t size)
654{
655 unsigned mapcnt;
656 phys_addr_t addr, start;
657 struct e820entry *entry = xen_e820_map;
658
659 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
660 if (entry->type != E820_RAM || entry->size < size)
661 continue;
662 start = entry->addr;
663 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
664 if (!memblock_is_reserved(addr))
665 continue;
666 start = addr + PAGE_SIZE;
667 if (start + size > entry->addr + entry->size)
668 break;
669 }
670 if (addr >= start + size) {
671 memblock_reserve(start, size);
672 return start;
673 }
674 }
675
676 return 0;
677}
678
679/*
Juergen Gross4b9c1532015-07-17 06:51:32 +0200680 * Like memcpy, but with physical addresses for dest and src.
681 */
682static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
683 phys_addr_t n)
684{
685 phys_addr_t dest_off, src_off, dest_len, src_len, len;
686 void *from, *to;
687
688 while (n) {
689 dest_off = dest & ~PAGE_MASK;
690 src_off = src & ~PAGE_MASK;
691 dest_len = n;
692 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
693 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
694 src_len = n;
695 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
696 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
697 len = min(dest_len, src_len);
698 to = early_memremap(dest - dest_off, dest_len + dest_off);
699 from = early_memremap(src - src_off, src_len + src_off);
700 memcpy(to, from, len);
701 early_memunmap(to, dest_len + dest_off);
702 early_memunmap(from, src_len + src_off);
703 n -= len;
704 dest += len;
705 src += len;
706 }
707}
708
709/*
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200710 * Reserve Xen mfn_list.
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200711 */
712static void __init xen_reserve_xen_mfnlist(void)
713{
Juergen Gross70e61192015-07-17 06:51:35 +0200714 phys_addr_t start, size;
715
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200716 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
Juergen Gross70e61192015-07-17 06:51:35 +0200717 start = __pa(xen_start_info->mfn_list);
718 size = PFN_ALIGN(xen_start_info->nr_pages *
719 sizeof(unsigned long));
720 } else {
721 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
722 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
723 }
724
725 if (!xen_is_e820_reserved(start, size)) {
726 memblock_reserve(start, size);
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200727 return;
728 }
729
Juergen Gross70e61192015-07-17 06:51:35 +0200730#ifdef CONFIG_X86_32
731 /*
732 * Relocating the p2m on 32 bit system to an arbitrary virtual address
733 * is not supported, so just give up.
734 */
735 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
736 BUG();
737#else
738 xen_relocate_p2m();
739#endif
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200740}
741
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700742/**
743 * machine_specific_memory_setup - Hook for machine specific memory setup.
744 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700745char * __init xen_memory_setup(void)
746{
Juergen Grossc70727a2015-07-17 06:51:36 +0200747 unsigned long max_pfn;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200748 phys_addr_t mem_end, addr, size, chunk_size;
749 u32 type;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800750 int rc;
751 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100752 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700753 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800754 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100755 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700756
Juergen Grossc70727a2015-07-17 06:51:36 +0200757 xen_parse_512gb();
758 max_pfn = xen_get_pages_limit();
759 max_pfn = min(max_pfn, xen_start_info->nr_pages);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800760 mem_end = PFN_PHYS(max_pfn);
761
762 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200763 set_xen_guest_handle(memmap.buffer, xen_e820_map);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800764
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100765 op = xen_initial_domain() ?
766 XENMEM_machine_memory_map :
767 XENMEM_memory_map;
768 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800769 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700770 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800771 memmap.nr_entries = 1;
Juergen Gross69632ec2015-07-17 06:51:26 +0200772 xen_e820_map[0].addr = 0ULL;
773 xen_e820_map[0].size = mem_end;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800774 /* 8MB slack (to balance backend allocations). */
Juergen Gross69632ec2015-07-17 06:51:26 +0200775 xen_e820_map[0].size += 8ULL << 20;
776 xen_e820_map[0].type = E820_RAM;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800777 rc = 0;
778 }
779 BUG_ON(rc);
Martin Kelly1ea644c2014-10-16 20:48:11 -0700780 BUG_ON(memmap.nr_entries == 0);
Juergen Gross69632ec2015-07-17 06:51:26 +0200781 xen_e820_map_entries = memmap.nr_entries;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100782
David Vrabel3bc38cb2013-08-16 15:42:55 +0100783 /*
784 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
785 * regions, so if we're using the machine memory map leave the
786 * region as RAM as it is in the pseudo-physical map.
787 *
788 * UNUSABLE regions in domUs are not handled and will need
789 * a patch in the future.
790 */
791 if (xen_initial_domain())
Juergen Gross69632ec2015-07-17 06:51:26 +0200792 xen_ignore_unusable();
David Vrabel3bc38cb2013-08-16 15:42:55 +0100793
David Vrabeldc91c722011-09-29 12:26:19 +0100794 /* Make sure the Xen-supplied memory map is well-ordered. */
Juergen Gross69632ec2015-07-17 06:51:26 +0200795 sanitize_e820_map(xen_e820_map, xen_e820_map_entries,
796 &xen_e820_map_entries);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700797
David Vrabeldc91c722011-09-29 12:26:19 +0100798 max_pages = xen_get_max_pages();
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500799
Juergen Gross5097cdf2015-07-17 06:51:27 +0200800 /* How many extra pages do we need due to remapping? */
Juergen Grosseafd72e2015-08-19 18:52:34 +0200801 max_pages += xen_count_remap_pages(max_pfn);
802
803 if (max_pages > max_pfn)
804 extra_pages += max_pages - max_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400805
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400806 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100807 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
808 * factor the base size. On non-highmem systems, the base
809 * size is the full initial memory allocation; on highmem it
810 * is limited to the max size of lowmem, so that it doesn't
811 * get completely filled.
812 *
Juergen Grossc70727a2015-07-17 06:51:36 +0200813 * Make sure we have no memory above max_pages, as this area
814 * isn't handled by the p2m management.
815 *
David Vrabeldc91c722011-09-29 12:26:19 +0100816 * In principle there could be a problem in lowmem systems if
817 * the initial memory is also very large with respect to
818 * lowmem, but we won't try to deal with that here.
819 */
Juergen Grossc70727a2015-07-17 06:51:36 +0200820 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
821 extra_pages, max_pages - max_pfn);
David Vrabeldc91c722011-09-29 12:26:19 +0100822 i = 0;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200823 addr = xen_e820_map[0].addr;
824 size = xen_e820_map[0].size;
Juergen Gross69632ec2015-07-17 06:51:26 +0200825 while (i < xen_e820_map_entries) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200826 chunk_size = size;
827 type = xen_e820_map[i].type;
David Vrabeldc91c722011-09-29 12:26:19 +0100828
829 if (type == E820_RAM) {
830 if (addr < mem_end) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200831 chunk_size = min(size, mem_end - addr);
David Vrabeldc91c722011-09-29 12:26:19 +0100832 } else if (extra_pages) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200833 chunk_size = min(size, PFN_PHYS(extra_pages));
834 extra_pages -= PFN_DOWN(chunk_size);
835 xen_add_extra_mem(addr, chunk_size);
836 xen_max_p2m_pfn = PFN_DOWN(addr + chunk_size);
David Vrabeldc91c722011-09-29 12:26:19 +0100837 } else
838 type = E820_UNUSABLE;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700839 }
840
Juergen Gross5097cdf2015-07-17 06:51:27 +0200841 xen_align_and_add_e820_region(addr, chunk_size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700842
Juergen Gross5097cdf2015-07-17 06:51:27 +0200843 addr += chunk_size;
844 size -= chunk_size;
845 if (size == 0) {
David Vrabeldc91c722011-09-29 12:26:19 +0100846 i++;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200847 if (i < xen_e820_map_entries) {
848 addr = xen_e820_map[i].addr;
849 size = xen_e820_map[i].size;
850 }
851 }
Ian Campbell35ae11f2009-02-06 19:09:48 -0800852 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700853
854 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000855 * Set the rest as identity mapped, in case PCI BARs are
856 * located here.
David Vrabel25b884a2014-01-03 15:46:10 +0000857 */
Juergen Gross5097cdf2015-07-17 06:51:27 +0200858 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
David Vrabel25b884a2014-01-03 15:46:10 +0000859
860 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700861 * In domU, the ISA region is normal, usable memory, but we
862 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700863 * about in there.
864 */
865 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
866 E820_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700867
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700868 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
869
Juergen Gross808fdb72015-07-17 06:51:30 +0200870 /*
871 * Check whether the kernel itself conflicts with the target E820 map.
872 * Failing now is better than running into weird problems later due
873 * to relocating (and even reusing) pages with kernel text or data.
874 */
875 if (xen_is_e820_reserved(__pa_symbol(_text),
876 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
877 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
878 BUG();
879 }
880
Juergen Gross04414ba2015-07-17 06:51:31 +0200881 /*
882 * Check for a conflict of the hypervisor supplied page tables with
883 * the target E820 map.
884 */
885 xen_pt_check_e820();
886
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200887 xen_reserve_xen_mfnlist();
888
Juergen Gross4b9c1532015-07-17 06:51:32 +0200889 /* Check for a conflict of the initrd with the target E820 map. */
890 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
891 boot_params.hdr.ramdisk_size)) {
892 phys_addr_t new_area, start, size;
893
894 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
895 if (!new_area) {
896 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
897 BUG();
898 }
899
900 start = boot_params.hdr.ramdisk_image;
901 size = boot_params.hdr.ramdisk_size;
902 xen_phys_memcpy(new_area, start, size);
903 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
904 start, start + size, new_area, new_area + size);
905 memblock_free(start, size);
906 boot_params.hdr.ramdisk_image = new_area;
907 boot_params.ext_ramdisk_image = new_area >> 32;
908 }
909
Juergen Gross5097cdf2015-07-17 06:51:27 +0200910 /*
911 * Set identity map on non-RAM pages and prepare remapping the
912 * underlying RAM.
913 */
914 xen_set_identity_and_remap(max_pfn);
915
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700916 return "Xen";
917}
918
Roland McGrathd2eea682007-07-20 00:31:43 -0700919/*
David Vrabelabacaad2014-06-02 17:58:01 +0100920 * Machine specific memory setup for auto-translated guests.
921 */
922char * __init xen_auto_xlated_memory_setup(void)
923{
David Vrabelabacaad2014-06-02 17:58:01 +0100924 struct xen_memory_map memmap;
925 int i;
926 int rc;
927
928 memmap.nr_entries = E820MAX;
Juergen Gross69632ec2015-07-17 06:51:26 +0200929 set_xen_guest_handle(memmap.buffer, xen_e820_map);
David Vrabelabacaad2014-06-02 17:58:01 +0100930
931 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
932 if (rc < 0)
933 panic("No memory map (%d)\n", rc);
934
Juergen Gross69632ec2015-07-17 06:51:26 +0200935 xen_e820_map_entries = memmap.nr_entries;
David Vrabelabacaad2014-06-02 17:58:01 +0100936
Juergen Gross69632ec2015-07-17 06:51:26 +0200937 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
938 &xen_e820_map_entries);
939
940 for (i = 0; i < xen_e820_map_entries; i++)
941 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
942 xen_e820_map[i].type);
David Vrabelabacaad2014-06-02 17:58:01 +0100943
Juergen Gross70e61192015-07-17 06:51:35 +0200944 /* Remove p2m info, it is not needed. */
945 xen_start_info->mfn_list = 0;
946 xen_start_info->first_p2m_pfn = 0;
947 xen_start_info->nr_p2m_frames = 0;
David Vrabelabacaad2014-06-02 17:58:01 +0100948
949 return "Xen";
950}
951
952/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700953 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700954 * We only need to bother in pure 32-bit mode; compat 32-bit processes
955 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700956 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100957static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700958{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700959#ifdef CONFIG_X86_32
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700960 /*
961 * This could be called before selected_vdso32 is initialized, so
962 * just fiddle with both possible images. vdso_image_32_syscall
963 * can't be selected, since it only exists on 64-bit systems.
964 */
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700965 u32 *mask;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700966 mask = vdso_image_32_int80.data +
967 vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700968 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Andy Lutomirski6f121e52014-05-05 12:19:34 -0700969 mask = vdso_image_32_sysenter.data +
970 vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700971 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700972#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700973}
974
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400975static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700976{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700977 struct callback_register callback = {
978 .type = type,
979 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700980 .flags = CALLBACKF_mask_events,
981 };
982
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700983 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
984}
985
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400986void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700987{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700988 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700989 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700990
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700991#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700992 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700993#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700994 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700995#endif
996
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700997 if (!boot_cpu_has(sysenter_feature))
998 return;
999
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001000 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001001 if(ret != 0)
1002 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -07001003}
1004
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001005void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001006{
1007#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001008 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001009
1010 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
1011 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -07001012 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001013 /* Pretty fatal; 64-bit userspace has no other
1014 mechanism for syscalls. */
1015 }
1016
1017 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001018 ret = register_callback(CALLBACKTYPE_syscall32,
1019 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -07001020 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001021 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001022 }
1023#endif /* CONFIG_X86_64 */
1024}
David Vrabelea9f9272014-06-16 13:07:00 +02001025
Mukesh Rathord285d682013-12-13 12:45:31 -05001026void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001027{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001028 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1029 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1030
Mukesh Rathord285d682013-12-13 12:45:31 -05001031 HYPERVISOR_vm_assist(VMASST_CMD_enable,
1032 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001033
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -07001034 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1035 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1036 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001037
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -07001038 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001039 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -05001040}
1041
1042/* This function is not called for HVM domains */
1043void __init xen_arch_setup(void)
1044{
1045 xen_panic_handler_init();
1046 if (!xen_feature(XENFEAT_auto_translated_physmap))
1047 xen_pvmmu_arch_setup();
1048
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001049#ifdef CONFIG_ACPI
1050 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1051 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1052 disable_acpi();
1053 }
1054#endif
1055
1056 memcpy(boot_command_line, xen_start_info->cmd_line,
1057 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1058 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1059
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -08001060 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -04001061 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -04001062 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -05001063 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -07001064 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -04001065#ifdef CONFIG_NUMA
1066 numa_off = 1;
1067#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001068}