blob: a5bf7c4514357e1e89d2703df5cb2052aeabcc12 [file] [log] [blame]
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
Paul Gortmaker7a2463d2016-07-13 20:18:59 -04007#include <linux/init.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07008#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070011#include <linux/memblock.h>
Len Brownd91ee582011-04-01 18:28:35 -040012#include <linux/cpuidle.h>
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -040013#include <linux/cpufreq.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070014
15#include <asm/elf.h>
Roland McGrath6c3652e2008-01-30 13:30:42 +010016#include <asm/vdso.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010017#include <asm/e820/api.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070018#include <asm/setup.h>
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -070019#include <asm/acpi.h>
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -040020#include <asm/numa.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070021#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
Ian Campbell45263cb2010-10-25 16:32:29 -070024#include <xen/xen.h>
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +010025#include <xen/page.h>
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -070026#include <xen/interface/callback.h>
Ian Campbell35ae11f2009-02-06 19:09:48 -080027#include <xen/interface/memory.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070028#include <xen/interface/physdev.h>
29#include <xen/features.h>
Juergen Gross808fdb72015-07-17 06:51:30 +020030#include <xen/hvc-console.h>
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070031#include "xen-ops.h"
Roland McGrathd2eea682007-07-20 00:31:43 -070032#include "vdso.h"
Juergen Gross1f3ac862014-11-28 11:53:53 +010033#include "mmu.h"
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -070034
Juergen Grossc70727a2015-07-17 06:51:36 +020035#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
36
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070037/* Amount of extra memory space we add to the e820 ranges */
David Vrabel8b5d44a2011-09-28 17:46:34 +010038struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070039
David Vrabelaa244112011-09-28 17:46:32 +010040/* Number of pages released from the initial allocation. */
41unsigned long xen_released_pages;
42
Juergen Gross69632ec2015-07-17 06:51:26 +020043/* E820 map used during setting up memory. */
Ingo Molnare7dbf7a2017-01-28 18:19:01 +010044static struct e820_table xen_e820_table __initdata;
Juergen Gross69632ec2015-07-17 06:51:26 +020045
Juergen Gross1f3ac862014-11-28 11:53:53 +010046/*
47 * Buffer used to remap identity mapped pages. We only need the virtual space.
48 * The physical page behind this address is remapped as needed to different
49 * buffer pages.
50 */
51#define REMAP_SIZE (P2M_PER_PAGE - 3)
52static struct {
53 unsigned long next_area_mfn;
54 unsigned long target_pfn;
55 unsigned long size;
56 unsigned long mfns[REMAP_SIZE];
57} xen_remap_buf __initdata __aligned(PAGE_SIZE);
58static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
Matt Rushton4fbb67e32014-08-11 11:57:57 -070059
Jeremy Fitzhardinge698bb8d2010-09-14 10:19:14 -070060/*
61 * The maximum amount of extra memory compared to the base size. The
62 * main scaling factor is the size of struct page. At extreme ratios
63 * of base:extra, all the base memory can be filled with page
64 * structures for the extra memory, leaving no space for anything
65 * else.
66 *
67 * 10x seems like a reasonable balance between scaling flexibility and
68 * leaving a practically usable system.
69 */
70#define EXTRA_MEM_RATIO (10)
71
Juergen Grossc70727a2015-07-17 06:51:36 +020072static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
73
74static void __init xen_parse_512gb(void)
75{
76 bool val = false;
77 char *arg;
78
79 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
80 if (!arg)
81 return;
82
83 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
84 if (!arg)
85 val = true;
86 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
87 return;
88
89 xen_512gb_limit = val;
90}
91
Juergen Gross626d7502015-09-04 14:05:51 +020092static void __init xen_add_extra_mem(unsigned long start_pfn,
93 unsigned long n_pfns)
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -070094{
David Vrabeldc91c722011-09-29 12:26:19 +010095 int i;
Konrad Rzeszutek Wilk6eaa4122011-01-18 20:09:41 -050096
Juergen Gross626d7502015-09-04 14:05:51 +020097 /*
98 * No need to check for zero size, should happen rarely and will only
99 * write a new entry regarded to be unused due to zero size.
100 */
David Vrabeldc91c722011-09-29 12:26:19 +0100101 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
102 /* Add new region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200103 if (xen_extra_mem[i].n_pfns == 0) {
104 xen_extra_mem[i].start_pfn = start_pfn;
105 xen_extra_mem[i].n_pfns = n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100106 break;
107 }
108 /* Append to existing region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200109 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
110 start_pfn) {
111 xen_extra_mem[i].n_pfns += n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100112 break;
113 }
114 }
115 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
116 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700117
Juergen Gross626d7502015-09-04 14:05:51 +0200118 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100119}
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700120
Juergen Gross626d7502015-09-04 14:05:51 +0200121static void __init xen_del_extra_mem(unsigned long start_pfn,
122 unsigned long n_pfns)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100123{
124 int i;
Juergen Gross626d7502015-09-04 14:05:51 +0200125 unsigned long start_r, size_r;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700126
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100127 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200128 start_r = xen_extra_mem[i].start_pfn;
129 size_r = xen_extra_mem[i].n_pfns;
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400130
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100131 /* Start of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200132 if (start_r == start_pfn) {
133 BUG_ON(n_pfns > size_r);
134 xen_extra_mem[i].start_pfn += n_pfns;
135 xen_extra_mem[i].n_pfns -= n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100136 break;
137 }
138 /* End of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200139 if (start_r + size_r == start_pfn + n_pfns) {
140 BUG_ON(n_pfns > size_r);
141 xen_extra_mem[i].n_pfns -= n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100142 break;
143 }
144 /* Mid of region. */
Juergen Gross626d7502015-09-04 14:05:51 +0200145 if (start_pfn > start_r && start_pfn < start_r + size_r) {
146 BUG_ON(start_pfn + n_pfns > start_r + size_r);
147 xen_extra_mem[i].n_pfns = start_pfn - start_r;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100148 /* Calling memblock_reserve() again is okay. */
Juergen Gross626d7502015-09-04 14:05:51 +0200149 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
150 (start_pfn + n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100151 break;
152 }
153 }
Juergen Gross626d7502015-09-04 14:05:51 +0200154 memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100155}
156
157/*
158 * Called during boot before the p2m list can take entries beyond the
159 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
160 * invalid.
161 */
162unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
163{
164 int i;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100165
166 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200167 if (pfn >= xen_extra_mem[i].start_pfn &&
168 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100169 return INVALID_P2M_ENTRY;
170 }
171
172 return IDENTITY_FRAME(pfn);
173}
174
175/*
176 * Mark all pfns of extra mem as invalid in p2m list.
177 */
178void __init xen_inv_extra_mem(void)
179{
180 unsigned long pfn, pfn_s, pfn_e;
181 int i;
182
183 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
Juergen Gross626d7502015-09-04 14:05:51 +0200184 if (!xen_extra_mem[i].n_pfns)
Juergen Gross9a17ad72015-01-12 06:05:10 +0100185 continue;
Juergen Gross626d7502015-09-04 14:05:51 +0200186 pfn_s = xen_extra_mem[i].start_pfn;
187 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
Juergen Gross5b8e7d82014-11-28 11:53:55 +0100188 for (pfn = pfn_s; pfn < pfn_e; pfn++)
189 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
Konrad Rzeszutek Wilkc96aae12012-08-17 16:43:28 -0400190 }
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700191}
192
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700193/*
194 * Finds the next RAM pfn available in the E820 map after min_pfn.
195 * This function updates min_pfn with the pfn found and returns
196 * the size of that range or zero if not found.
197 */
Juergen Gross69632ec2015-07-17 06:51:26 +0200198static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400199{
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100200 const struct e820_entry *entry = xen_e820_table.entries;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400201 unsigned int i;
202 unsigned long done = 0;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400203
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100204 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400205 unsigned long s_pfn;
206 unsigned long e_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400207
Ingo Molnar09821ff2017-01-28 17:09:33 +0100208 if (entry->type != E820_TYPE_RAM)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400209 continue;
210
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800211 e_pfn = PFN_DOWN(entry->addr + entry->size);
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400212
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700213 /* We only care about E820 after this */
Zhenzhong Duanabed7d02015-10-27 15:19:52 -0400214 if (e_pfn <= *min_pfn)
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400215 continue;
216
zhenzhong.duanc3d93f82012-07-18 13:06:39 +0800217 s_pfn = PFN_UP(entry->addr);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700218
219 /* If min_pfn falls within the E820 entry, we want to start
220 * at the min_pfn PFN.
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400221 */
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700222 if (s_pfn <= *min_pfn) {
223 done = e_pfn - *min_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400224 } else {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700225 done = e_pfn - s_pfn;
226 *min_pfn = s_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400227 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700228 break;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400229 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700230
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400231 return done;
232}
David Vrabel83d51ab2012-05-03 16:15:42 +0100233
Juergen Gross1f3ac862014-11-28 11:53:53 +0100234static int __init xen_free_mfn(unsigned long mfn)
235{
236 struct xen_memory_reservation reservation = {
237 .address_bits = 0,
238 .extent_order = 0,
239 .domid = DOMID_SELF
240 };
241
242 set_xen_guest_handle(reservation.extent_start, &mfn);
243 reservation.nr_extents = 1;
244
245 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
246}
247
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700248/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100249 * This releases a chunk of memory and then does the identity map. It's used
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700250 * as a fallback if the remapping fails.
251 */
252static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200253 unsigned long end_pfn, unsigned long nr_pages)
David Vrabel83d51ab2012-05-03 16:15:42 +0100254{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100255 unsigned long pfn, end;
256 int ret;
257
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700258 WARN_ON(start_pfn > end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100259
David Vrabelbc7142c2015-01-07 11:01:08 +0000260 /* Release pages first. */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100261 end = min(end_pfn, nr_pages);
262 for (pfn = start_pfn; pfn < end; pfn++) {
263 unsigned long mfn = pfn_to_mfn(pfn);
264
265 /* Make sure pfn exists to start with */
266 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
267 continue;
268
269 ret = xen_free_mfn(mfn);
270 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
271
272 if (ret == 1) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200273 xen_released_pages++;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100274 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
275 break;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100276 } else
277 break;
278 }
279
David Vrabelbc7142c2015-01-07 11:01:08 +0000280 set_phys_range_identity(start_pfn, end_pfn);
David Vrabel83d51ab2012-05-03 16:15:42 +0100281}
282
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700283/*
Juergen Gross1f3ac862014-11-28 11:53:53 +0100284 * Helper function to update the p2m and m2p tables and kernel mapping.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700285 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100286static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700287{
288 struct mmu_update update = {
Juergen Gross3ba5c862015-01-28 07:44:22 +0100289 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700290 .val = pfn
291 };
292
293 /* Update p2m */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100294 if (!set_phys_to_machine(pfn, mfn)) {
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700295 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
296 pfn, mfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100297 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700298 }
299
300 /* Update m2p */
301 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
302 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
303 mfn, pfn);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100304 BUG();
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700305 }
306
Juergen Gross1f3ac862014-11-28 11:53:53 +0100307 /* Update kernel mapping, but not for highmem. */
Juergen Grosse86f9492015-01-12 06:05:09 +0100308 if (pfn >= PFN_UP(__pa(high_memory - 1)))
Juergen Gross1f3ac862014-11-28 11:53:53 +0100309 return;
310
311 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
312 mfn_pte(mfn, PAGE_KERNEL), 0)) {
313 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
314 mfn, pfn);
315 BUG();
316 }
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700317}
318
319/*
320 * This function updates the p2m and m2p tables with an identity map from
Juergen Gross1f3ac862014-11-28 11:53:53 +0100321 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
322 * original allocation at remap_pfn. The information needed for remapping is
323 * saved in the memory itself to avoid the need for allocating buffers. The
324 * complete remap information is contained in a list of MFNs each containing
325 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
326 * This enables us to preserve the original mfn sequence while doing the
327 * remapping at a time when the memory management is capable of allocating
328 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
329 * its callers.
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700330 */
Juergen Gross1f3ac862014-11-28 11:53:53 +0100331static void __init xen_do_set_identity_and_remap_chunk(
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700332 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
333{
Juergen Gross1f3ac862014-11-28 11:53:53 +0100334 unsigned long buf = (unsigned long)&xen_remap_buf;
335 unsigned long mfn_save, mfn;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700336 unsigned long ident_pfn_iter, remap_pfn_iter;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100337 unsigned long ident_end_pfn = start_pfn + size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700338 unsigned long left = size;
Juergen Gross1f3ac862014-11-28 11:53:53 +0100339 unsigned int i, chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700340
341 WARN_ON(size == 0);
342
343 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
344
Juergen Gross1f3ac862014-11-28 11:53:53 +0100345 mfn_save = virt_to_mfn(buf);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700346
Juergen Gross1f3ac862014-11-28 11:53:53 +0100347 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
348 ident_pfn_iter < ident_end_pfn;
349 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
350 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700351
Juergen Gross1f3ac862014-11-28 11:53:53 +0100352 /* Map first pfn to xen_remap_buf */
353 mfn = pfn_to_mfn(ident_pfn_iter);
354 set_pte_mfn(buf, mfn, PAGE_KERNEL);
355
356 /* Save mapping information in page */
357 xen_remap_buf.next_area_mfn = xen_remap_mfn;
358 xen_remap_buf.target_pfn = remap_pfn_iter;
359 xen_remap_buf.size = chunk;
360 for (i = 0; i < chunk; i++)
361 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
362
363 /* Put remap buf into list. */
364 xen_remap_mfn = mfn;
365
366 /* Set identity map */
David Vrabelbc7142c2015-01-07 11:01:08 +0000367 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700368
Juergen Gross1f3ac862014-11-28 11:53:53 +0100369 left -= chunk;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700370 }
371
Juergen Gross1f3ac862014-11-28 11:53:53 +0100372 /* Restore old xen_remap_buf mapping */
373 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700374}
375
376/*
377 * This function takes a contiguous pfn range that needs to be identity mapped
378 * and:
379 *
380 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
381 * 2) Calls the do_ function to actually do the mapping/remapping work.
382 *
383 * The goal is to not allocate additional memory but to remap the existing
384 * pages. In the case of an error the underlying memory is simply released back
385 * to Xen and not remapped.
386 */
Juergen Gross76f0a482014-12-08 06:32:19 +0100387static unsigned long __init xen_set_identity_and_remap_chunk(
Juergen Gross69632ec2015-07-17 06:51:26 +0200388 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200389 unsigned long remap_pfn)
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700390{
391 unsigned long pfn;
392 unsigned long i = 0;
393 unsigned long n = end_pfn - start_pfn;
394
Juergen Grossdd14be92016-05-18 16:44:54 +0200395 if (remap_pfn == 0)
396 remap_pfn = nr_pages;
397
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700398 while (i < n) {
399 unsigned long cur_pfn = start_pfn + i;
400 unsigned long left = n - i;
401 unsigned long size = left;
402 unsigned long remap_range_size;
403
404 /* Do not remap pages beyond the current allocation */
405 if (cur_pfn >= nr_pages) {
406 /* Identity map remaining pages */
David Vrabelbc7142c2015-01-07 11:01:08 +0000407 set_phys_range_identity(cur_pfn, cur_pfn + size);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700408 break;
409 }
410 if (cur_pfn + size > nr_pages)
411 size = nr_pages - cur_pfn;
412
Juergen Gross69632ec2015-07-17 06:51:26 +0200413 remap_range_size = xen_find_pfn_range(&remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700414 if (!remap_range_size) {
415 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
416 xen_set_identity_and_release_chunk(cur_pfn,
Juergen Gross5097cdf2015-07-17 06:51:27 +0200417 cur_pfn + left, nr_pages);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700418 break;
419 }
420 /* Adjust size to fit in current e820 RAM region */
421 if (size > remap_range_size)
422 size = remap_range_size;
423
Juergen Gross1f3ac862014-11-28 11:53:53 +0100424 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700425
426 /* Update variables to reflect new mappings. */
427 i += size;
428 remap_pfn += size;
Matt Rushton4fbb67e32014-08-11 11:57:57 -0700429 }
430
431 /*
432 * If the PFNs are currently mapped, the VA mapping also needs
433 * to be updated to be 1:1.
434 */
435 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
436 (void)HYPERVISOR_update_va_mapping(
437 (unsigned long)__va(pfn << PAGE_SHIFT),
438 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
439
440 return remap_pfn;
441}
442
Juergen Grossdd14be92016-05-18 16:44:54 +0200443static unsigned long __init xen_count_remap_pages(
444 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
445 unsigned long remap_pages)
446{
447 if (start_pfn >= nr_pages)
448 return remap_pages;
449
450 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
451}
452
453static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
454 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
455 unsigned long nr_pages, unsigned long last_val))
Miroslav Rezanina093d7b42009-09-16 03:56:17 -0400456{
David Vrabelf3f436e2011-09-28 17:46:36 +0100457 phys_addr_t start = 0;
Juergen Grossdd14be92016-05-18 16:44:54 +0200458 unsigned long ret_val = 0;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100459 const struct e820_entry *entry = xen_e820_table.entries;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500460 int i;
461
David Vrabelf3f436e2011-09-28 17:46:36 +0100462 /*
463 * Combine non-RAM regions and gaps until a RAM region (or the
Juergen Grossdd14be92016-05-18 16:44:54 +0200464 * end of the map) is reached, then call the provided function
465 * to perform its duty on the non-RAM region.
David Vrabelf3f436e2011-09-28 17:46:36 +0100466 *
467 * The combined non-RAM regions are rounded to a whole number
468 * of pages so any partial pages are accessible via the 1:1
469 * mapping. This is needed for some BIOSes that put (for
470 * example) the DMI tables in a reserved region that begins on
471 * a non-page boundary.
472 */
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100473 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100474 phys_addr_t end = entry->addr + entry->size;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100475 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
David Vrabelf3f436e2011-09-28 17:46:36 +0100476 unsigned long start_pfn = PFN_DOWN(start);
477 unsigned long end_pfn = PFN_UP(end);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500478
Ingo Molnar09821ff2017-01-28 17:09:33 +0100479 if (entry->type == E820_TYPE_RAM)
David Vrabelf3f436e2011-09-28 17:46:36 +0100480 end_pfn = PFN_UP(entry->addr);
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500481
David Vrabel83d51ab2012-05-03 16:15:42 +0100482 if (start_pfn < end_pfn)
Juergen Grossdd14be92016-05-18 16:44:54 +0200483 ret_val = func(start_pfn, end_pfn, nr_pages,
484 ret_val);
David Vrabelf3f436e2011-09-28 17:46:36 +0100485 start = end;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500486 }
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500487 }
David Vrabelf3f436e2011-09-28 17:46:36 +0100488
Juergen Grossdd14be92016-05-18 16:44:54 +0200489 return ret_val;
Konrad Rzeszutek Wilk68df0da2011-02-01 17:15:30 -0500490}
Juergen Gross1f3ac862014-11-28 11:53:53 +0100491
492/*
493 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
494 * The remap information (which mfn remap to which pfn) is contained in the
495 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
496 * This scheme allows to remap the different chunks in arbitrary order while
497 * the resulting mapping will be independant from the order.
498 */
499void __init xen_remap_memory(void)
500{
501 unsigned long buf = (unsigned long)&xen_remap_buf;
502 unsigned long mfn_save, mfn, pfn;
503 unsigned long remapped = 0;
504 unsigned int i;
505 unsigned long pfn_s = ~0UL;
506 unsigned long len = 0;
507
508 mfn_save = virt_to_mfn(buf);
509
510 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
511 /* Map the remap information */
512 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
513
514 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
515
516 pfn = xen_remap_buf.target_pfn;
517 for (i = 0; i < xen_remap_buf.size; i++) {
518 mfn = xen_remap_buf.mfns[i];
519 xen_update_mem_tables(pfn, mfn);
520 remapped++;
521 pfn++;
522 }
523 if (pfn_s == ~0UL || pfn == pfn_s) {
524 pfn_s = xen_remap_buf.target_pfn;
525 len += xen_remap_buf.size;
526 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
527 len += xen_remap_buf.size;
528 } else {
Juergen Gross626d7502015-09-04 14:05:51 +0200529 xen_del_extra_mem(pfn_s, len);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100530 pfn_s = xen_remap_buf.target_pfn;
531 len = xen_remap_buf.size;
532 }
533
534 mfn = xen_remap_mfn;
535 xen_remap_mfn = xen_remap_buf.next_area_mfn;
536 }
537
538 if (pfn_s != ~0UL && len)
Juergen Gross626d7502015-09-04 14:05:51 +0200539 xen_del_extra_mem(pfn_s, len);
Juergen Gross1f3ac862014-11-28 11:53:53 +0100540
541 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
542
543 pr_info("Remapped %ld page(s)\n", remapped);
544}
545
Juergen Grossc70727a2015-07-17 06:51:36 +0200546static unsigned long __init xen_get_pages_limit(void)
547{
548 unsigned long limit;
549
550#ifdef CONFIG_X86_32
551 limit = GB(64) / PAGE_SIZE;
552#else
Juergen Grosscb9e4442015-09-04 14:18:08 +0200553 limit = MAXMEM / PAGE_SIZE;
Juergen Grossc70727a2015-07-17 06:51:36 +0200554 if (!xen_initial_domain() && xen_512gb_limit)
555 limit = GB(512) / PAGE_SIZE;
556#endif
557 return limit;
558}
559
David Vrabeld312ae872011-08-19 15:57:16 +0100560static unsigned long __init xen_get_max_pages(void)
561{
Juergen Grossc70727a2015-07-17 06:51:36 +0200562 unsigned long max_pages, limit;
David Vrabeld312ae872011-08-19 15:57:16 +0100563 domid_t domid = DOMID_SELF;
Juergen Gross24f775a2015-09-04 14:50:33 +0200564 long ret;
David Vrabeld312ae872011-08-19 15:57:16 +0100565
Juergen Grossc70727a2015-07-17 06:51:36 +0200566 limit = xen_get_pages_limit();
567 max_pages = limit;
568
Ian Campbelld3db7282011-12-14 12:16:08 +0000569 /*
570 * For the initial domain we use the maximum reservation as
571 * the maximum page.
572 *
573 * For guest domains the current maximum reservation reflects
574 * the current maximum rather than the static maximum. In this
575 * case the e820 map provided to us will cover the static
576 * maximum region.
577 */
578 if (xen_initial_domain()) {
579 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
580 if (ret > 0)
581 max_pages = ret;
582 }
583
Juergen Grossc70727a2015-07-17 06:51:36 +0200584 return min(max_pages, limit);
David Vrabeld312ae872011-08-19 15:57:16 +0100585}
586
Juergen Grossa3f52392015-01-28 07:44:23 +0100587static void __init xen_align_and_add_e820_region(phys_addr_t start,
588 phys_addr_t size, int type)
David Vrabeldc91c722011-09-29 12:26:19 +0100589{
Juergen Gross3ba5c862015-01-28 07:44:22 +0100590 phys_addr_t end = start + size;
David Vrabeldc91c722011-09-29 12:26:19 +0100591
592 /* Align RAM regions to page boundaries. */
Ingo Molnar09821ff2017-01-28 17:09:33 +0100593 if (type == E820_TYPE_RAM) {
David Vrabeldc91c722011-09-29 12:26:19 +0100594 start = PAGE_ALIGN(start);
Juergen Gross3ba5c862015-01-28 07:44:22 +0100595 end &= ~((phys_addr_t)PAGE_SIZE - 1);
David Vrabeldc91c722011-09-29 12:26:19 +0100596 }
597
Ingo Molnarab6bc042017-01-28 14:19:36 +0100598 e820__range_add(start, end - start, type);
David Vrabeldc91c722011-09-29 12:26:19 +0100599}
600
Juergen Gross69632ec2015-07-17 06:51:26 +0200601static void __init xen_ignore_unusable(void)
David Vrabel3bc38cb2013-08-16 15:42:55 +0100602{
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100603 struct e820_entry *entry = xen_e820_table.entries;
David Vrabel3bc38cb2013-08-16 15:42:55 +0100604 unsigned int i;
605
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100606 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
Ingo Molnar09821ff2017-01-28 17:09:33 +0100607 if (entry->type == E820_TYPE_UNUSABLE)
608 entry->type = E820_TYPE_RAM;
David Vrabel3bc38cb2013-08-16 15:42:55 +0100609 }
610}
611
Juergen Grosse612b4a2015-07-17 06:51:28 +0200612bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
613{
Ingo Molnar8ec67d92017-01-27 12:54:38 +0100614 struct e820_entry *entry;
Juergen Grosse612b4a2015-07-17 06:51:28 +0200615 unsigned mapcnt;
616 phys_addr_t end;
617
618 if (!size)
619 return false;
620
621 end = start + size;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100622 entry = xen_e820_table.entries;
Juergen Grosse612b4a2015-07-17 06:51:28 +0200623
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100624 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
Ingo Molnar09821ff2017-01-28 17:09:33 +0100625 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
Juergen Grosse612b4a2015-07-17 06:51:28 +0200626 (entry->addr + entry->size) >= end)
627 return false;
628
629 entry++;
630 }
631
632 return true;
633}
634
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200635/*
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200636 * Find a free area in physical memory not yet reserved and compliant with
637 * E820 map.
638 * Used to relocate pre-allocated areas like initrd or p2m list which are in
639 * conflict with the to be used E820 map.
640 * In case no area is found, return 0. Otherwise return the physical address
641 * of the area which is already reserved for convenience.
642 */
643phys_addr_t __init xen_find_free_area(phys_addr_t size)
644{
645 unsigned mapcnt;
646 phys_addr_t addr, start;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100647 struct e820_entry *entry = xen_e820_table.entries;
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200648
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100649 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
Ingo Molnar09821ff2017-01-28 17:09:33 +0100650 if (entry->type != E820_TYPE_RAM || entry->size < size)
Juergen Gross9ddac5b2015-07-17 06:51:29 +0200651 continue;
652 start = entry->addr;
653 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
654 if (!memblock_is_reserved(addr))
655 continue;
656 start = addr + PAGE_SIZE;
657 if (start + size > entry->addr + entry->size)
658 break;
659 }
660 if (addr >= start + size) {
661 memblock_reserve(start, size);
662 return start;
663 }
664 }
665
666 return 0;
667}
668
669/*
Juergen Gross4b9c1532015-07-17 06:51:32 +0200670 * Like memcpy, but with physical addresses for dest and src.
671 */
672static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
673 phys_addr_t n)
674{
675 phys_addr_t dest_off, src_off, dest_len, src_len, len;
676 void *from, *to;
677
678 while (n) {
679 dest_off = dest & ~PAGE_MASK;
680 src_off = src & ~PAGE_MASK;
681 dest_len = n;
682 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
683 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
684 src_len = n;
685 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
686 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
687 len = min(dest_len, src_len);
688 to = early_memremap(dest - dest_off, dest_len + dest_off);
689 from = early_memremap(src - src_off, src_len + src_off);
690 memcpy(to, from, len);
691 early_memunmap(to, dest_len + dest_off);
692 early_memunmap(from, src_len + src_off);
693 n -= len;
694 dest += len;
695 src += len;
696 }
697}
698
699/*
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200700 * Reserve Xen mfn_list.
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200701 */
702static void __init xen_reserve_xen_mfnlist(void)
703{
Juergen Gross70e61192015-07-17 06:51:35 +0200704 phys_addr_t start, size;
705
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200706 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
Juergen Gross70e61192015-07-17 06:51:35 +0200707 start = __pa(xen_start_info->mfn_list);
708 size = PFN_ALIGN(xen_start_info->nr_pages *
709 sizeof(unsigned long));
710 } else {
711 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
712 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
713 }
714
Ross Lagerwall7ecec852016-12-12 14:35:13 +0000715 memblock_reserve(start, size);
716 if (!xen_is_e820_reserved(start, size))
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200717 return;
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200718
Juergen Gross70e61192015-07-17 06:51:35 +0200719#ifdef CONFIG_X86_32
720 /*
721 * Relocating the p2m on 32 bit system to an arbitrary virtual address
722 * is not supported, so just give up.
723 */
724 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
725 BUG();
726#else
727 xen_relocate_p2m();
Ross Lagerwall7ecec852016-12-12 14:35:13 +0000728 memblock_free(start, size);
Juergen Gross70e61192015-07-17 06:51:35 +0200729#endif
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200730}
731
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700732/**
733 * machine_specific_memory_setup - Hook for machine specific memory setup.
734 **/
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700735char * __init xen_memory_setup(void)
736{
Juergen Gross626d7502015-09-04 14:05:51 +0200737 unsigned long max_pfn, pfn_s, n_pfns;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200738 phys_addr_t mem_end, addr, size, chunk_size;
739 u32 type;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800740 int rc;
741 struct xen_memory_map memmap;
David Vrabeldc91c722011-09-29 12:26:19 +0100742 unsigned long max_pages;
Jeremy Fitzhardinge42ee1472010-08-30 16:41:02 -0700743 unsigned long extra_pages = 0;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800744 int i;
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100745 int op;
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700746
Juergen Grossc70727a2015-07-17 06:51:36 +0200747 xen_parse_512gb();
748 max_pfn = xen_get_pages_limit();
749 max_pfn = min(max_pfn, xen_start_info->nr_pages);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800750 mem_end = PFN_PHYS(max_pfn);
751
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100752 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
753 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800754
Ian Campbell9e9a5fc2010-09-02 16:16:00 +0100755 op = xen_initial_domain() ?
756 XENMEM_machine_memory_map :
757 XENMEM_memory_map;
758 rc = HYPERVISOR_memory_op(op, &memmap);
Ian Campbell35ae11f2009-02-06 19:09:48 -0800759 if (rc == -ENOSYS) {
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700760 BUG_ON(xen_initial_domain());
Ian Campbell35ae11f2009-02-06 19:09:48 -0800761 memmap.nr_entries = 1;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100762 xen_e820_table.entries[0].addr = 0ULL;
763 xen_e820_table.entries[0].size = mem_end;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800764 /* 8MB slack (to balance backend allocations). */
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100765 xen_e820_table.entries[0].size += 8ULL << 20;
766 xen_e820_table.entries[0].type = E820_TYPE_RAM;
Ian Campbell35ae11f2009-02-06 19:09:48 -0800767 rc = 0;
768 }
769 BUG_ON(rc);
Martin Kelly1ea644c2014-10-16 20:48:11 -0700770 BUG_ON(memmap.nr_entries == 0);
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100771 xen_e820_table.nr_entries = memmap.nr_entries;
Jeremy Fitzhardinge8006ec32008-05-26 23:31:19 +0100772
David Vrabel3bc38cb2013-08-16 15:42:55 +0100773 /*
774 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
775 * regions, so if we're using the machine memory map leave the
776 * region as RAM as it is in the pseudo-physical map.
777 *
778 * UNUSABLE regions in domUs are not handled and will need
779 * a patch in the future.
780 */
781 if (xen_initial_domain())
Juergen Gross69632ec2015-07-17 06:51:26 +0200782 xen_ignore_unusable();
David Vrabel3bc38cb2013-08-16 15:42:55 +0100783
David Vrabeldc91c722011-09-29 12:26:19 +0100784 /* Make sure the Xen-supplied memory map is well-ordered. */
Ingo Molnarf9748fa2017-01-28 18:00:35 +0100785 e820__update_table(&xen_e820_table);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700786
David Vrabeldc91c722011-09-29 12:26:19 +0100787 max_pages = xen_get_max_pages();
Stefano Stabellini7cb31b72011-01-27 10:13:25 -0500788
Juergen Gross5097cdf2015-07-17 06:51:27 +0200789 /* How many extra pages do we need due to remapping? */
Juergen Grossdd14be92016-05-18 16:44:54 +0200790 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
Juergen Grosseafd72e2015-08-19 18:52:34 +0200791
792 if (max_pages > max_pfn)
793 extra_pages += max_pages - max_pfn;
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400794
Konrad Rzeszutek Wilk2e2fb752012-04-06 10:07:11 -0400795 /*
David Vrabeldc91c722011-09-29 12:26:19 +0100796 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
797 * factor the base size. On non-highmem systems, the base
798 * size is the full initial memory allocation; on highmem it
799 * is limited to the max size of lowmem, so that it doesn't
800 * get completely filled.
801 *
Juergen Grossc70727a2015-07-17 06:51:36 +0200802 * Make sure we have no memory above max_pages, as this area
803 * isn't handled by the p2m management.
804 *
David Vrabeldc91c722011-09-29 12:26:19 +0100805 * In principle there could be a problem in lowmem systems if
806 * the initial memory is also very large with respect to
807 * lowmem, but we won't try to deal with that here.
808 */
Juergen Grossc70727a2015-07-17 06:51:36 +0200809 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
810 extra_pages, max_pages - max_pfn);
David Vrabeldc91c722011-09-29 12:26:19 +0100811 i = 0;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100812 addr = xen_e820_table.entries[0].addr;
813 size = xen_e820_table.entries[0].size;
814 while (i < xen_e820_table.nr_entries) {
David Vrabelf5775e02015-01-19 11:08:05 +0000815 bool discard = false;
816
Juergen Gross5097cdf2015-07-17 06:51:27 +0200817 chunk_size = size;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100818 type = xen_e820_table.entries[i].type;
David Vrabeldc91c722011-09-29 12:26:19 +0100819
Ingo Molnar09821ff2017-01-28 17:09:33 +0100820 if (type == E820_TYPE_RAM) {
David Vrabeldc91c722011-09-29 12:26:19 +0100821 if (addr < mem_end) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200822 chunk_size = min(size, mem_end - addr);
David Vrabeldc91c722011-09-29 12:26:19 +0100823 } else if (extra_pages) {
Juergen Gross5097cdf2015-07-17 06:51:27 +0200824 chunk_size = min(size, PFN_PHYS(extra_pages));
Juergen Gross626d7502015-09-04 14:05:51 +0200825 pfn_s = PFN_UP(addr);
826 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
827 extra_pages -= n_pfns;
828 xen_add_extra_mem(pfn_s, n_pfns);
829 xen_max_p2m_pfn = pfn_s + n_pfns;
David Vrabeldc91c722011-09-29 12:26:19 +0100830 } else
David Vrabelf5775e02015-01-19 11:08:05 +0000831 discard = true;
Jeremy Fitzhardinge36545812010-09-29 16:54:33 -0700832 }
833
David Vrabelf5775e02015-01-19 11:08:05 +0000834 if (!discard)
835 xen_align_and_add_e820_region(addr, chunk_size, type);
Jeremy Fitzhardingeb5b43ce2010-09-02 17:10:12 -0700836
Juergen Gross5097cdf2015-07-17 06:51:27 +0200837 addr += chunk_size;
838 size -= chunk_size;
839 if (size == 0) {
David Vrabeldc91c722011-09-29 12:26:19 +0100840 i++;
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100841 if (i < xen_e820_table.nr_entries) {
842 addr = xen_e820_table.entries[i].addr;
843 size = xen_e820_table.entries[i].size;
Juergen Gross5097cdf2015-07-17 06:51:27 +0200844 }
845 }
Ian Campbell35ae11f2009-02-06 19:09:48 -0800846 }
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700847
848 /*
David Vrabel25b884a2014-01-03 15:46:10 +0000849 * Set the rest as identity mapped, in case PCI BARs are
850 * located here.
David Vrabel25b884a2014-01-03 15:46:10 +0000851 */
Juergen Gross5097cdf2015-07-17 06:51:27 +0200852 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
David Vrabel25b884a2014-01-03 15:46:10 +0000853
854 /*
Ian Campbell9ec23a7f2010-10-28 11:32:29 -0700855 * In domU, the ISA region is normal, usable memory, but we
856 * reserve ISA memory anyway because too many things poke
Jeremy Fitzhardingeb792c752008-06-16 14:54:49 -0700857 * about in there.
858 */
Ingo Molnarf9748fa2017-01-28 18:00:35 +0100859 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700860
Ingo Molnarf9748fa2017-01-28 18:00:35 +0100861 e820__update_table(e820_table);
Jeremy Fitzhardingebe5bf9f2008-06-16 14:54:46 -0700862
Juergen Gross808fdb72015-07-17 06:51:30 +0200863 /*
864 * Check whether the kernel itself conflicts with the target E820 map.
865 * Failing now is better than running into weird problems later due
866 * to relocating (and even reusing) pages with kernel text or data.
867 */
868 if (xen_is_e820_reserved(__pa_symbol(_text),
869 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
870 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
871 BUG();
872 }
873
Juergen Gross04414ba2015-07-17 06:51:31 +0200874 /*
875 * Check for a conflict of the hypervisor supplied page tables with
876 * the target E820 map.
877 */
878 xen_pt_check_e820();
879
Juergen Gross8f5b0c62015-07-17 06:51:25 +0200880 xen_reserve_xen_mfnlist();
881
Juergen Gross4b9c1532015-07-17 06:51:32 +0200882 /* Check for a conflict of the initrd with the target E820 map. */
883 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
884 boot_params.hdr.ramdisk_size)) {
885 phys_addr_t new_area, start, size;
886
887 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
888 if (!new_area) {
889 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
890 BUG();
891 }
892
893 start = boot_params.hdr.ramdisk_image;
894 size = boot_params.hdr.ramdisk_size;
895 xen_phys_memcpy(new_area, start, size);
896 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
897 start, start + size, new_area, new_area + size);
898 memblock_free(start, size);
899 boot_params.hdr.ramdisk_image = new_area;
900 boot_params.ext_ramdisk_image = new_area >> 32;
901 }
902
Juergen Gross5097cdf2015-07-17 06:51:27 +0200903 /*
904 * Set identity map on non-RAM pages and prepare remapping the
905 * underlying RAM.
906 */
Juergen Grossdd14be92016-05-18 16:44:54 +0200907 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
908
909 pr_info("Released %ld page(s)\n", xen_released_pages);
Juergen Gross5097cdf2015-07-17 06:51:27 +0200910
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700911 return "Xen";
912}
913
Roland McGrathd2eea682007-07-20 00:31:43 -0700914/*
David Vrabelabacaad2014-06-02 17:58:01 +0100915 * Machine specific memory setup for auto-translated guests.
916 */
917char * __init xen_auto_xlated_memory_setup(void)
918{
David Vrabelabacaad2014-06-02 17:58:01 +0100919 struct xen_memory_map memmap;
920 int i;
921 int rc;
922
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100923 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
924 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
David Vrabelabacaad2014-06-02 17:58:01 +0100925
926 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
927 if (rc < 0)
928 panic("No memory map (%d)\n", rc);
929
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100930 xen_e820_table.nr_entries = memmap.nr_entries;
David Vrabelabacaad2014-06-02 17:58:01 +0100931
Ingo Molnarf9748fa2017-01-28 18:00:35 +0100932 e820__update_table(&xen_e820_table);
Juergen Gross69632ec2015-07-17 06:51:26 +0200933
Ingo Molnare7dbf7a2017-01-28 18:19:01 +0100934 for (i = 0; i < xen_e820_table.nr_entries; i++)
935 e820__range_add(xen_e820_table.entries[i].addr, xen_e820_table.entries[i].size, xen_e820_table.entries[i].type);
David Vrabelabacaad2014-06-02 17:58:01 +0100936
Juergen Gross70e61192015-07-17 06:51:35 +0200937 /* Remove p2m info, it is not needed. */
938 xen_start_info->mfn_list = 0;
939 xen_start_info->first_p2m_pfn = 0;
940 xen_start_info->nr_p2m_frames = 0;
David Vrabelabacaad2014-06-02 17:58:01 +0100941
942 return "Xen";
943}
944
945/*
Roland McGrathd2eea682007-07-20 00:31:43 -0700946 * Set the bit indicating "nosegneg" library variants should be used.
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700947 * We only need to bother in pure 32-bit mode; compat 32-bit processes
948 * can have un-truncated segments, so wrapping around is allowed.
Roland McGrathd2eea682007-07-20 00:31:43 -0700949 */
Sam Ravnborg08b6d292008-01-30 13:33:25 +0100950static void __init fiddle_vdso(void)
Roland McGrathd2eea682007-07-20 00:31:43 -0700951{
Jeremy Fitzhardinge6a52e4b2008-07-12 02:22:00 -0700952#ifdef CONFIG_X86_32
Andy Lutomirski0a6d1fa2015-10-05 17:47:56 -0700953 u32 *mask = vdso_image_32.data +
954 vdso_image_32.sym_VDSO32_NOTE_MASK;
Roland McGrathd2eea682007-07-20 00:31:43 -0700955 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700956#endif
Roland McGrathd2eea682007-07-20 00:31:43 -0700957}
958
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400959static int register_callback(unsigned type, const void *func)
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700960{
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700961 struct callback_register callback = {
962 .type = type,
963 .address = XEN_CALLBACK(__KERNEL_CS, func),
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700964 .flags = CALLBACKF_mask_events,
965 };
966
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700967 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
968}
969
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400970void xen_enable_sysenter(void)
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700971{
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700972 int ret;
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700973 unsigned sysenter_feature;
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -0700974
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700975#ifdef CONFIG_X86_32
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700976 sysenter_feature = X86_FEATURE_SEP;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700977#else
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700978 sysenter_feature = X86_FEATURE_SYSENTER32;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700979#endif
980
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700981 if (!boot_cpu_has(sysenter_feature))
982 return;
983
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700984 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700985 if(ret != 0)
986 setup_clear_cpu_cap(sysenter_feature);
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700987}
988
Paul Gortmaker148f9bb2013-06-18 18:23:59 -0400989void xen_enable_syscall(void)
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700990{
991#ifdef CONFIG_X86_64
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700992 int ret;
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -0700993
994 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
995 if (ret != 0) {
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -0700996 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -0700997 /* Pretty fatal; 64-bit userspace has no other
998 mechanism for syscalls. */
999 }
1000
1001 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001002 ret = register_callback(CALLBACKTYPE_syscall32,
1003 xen_syscall32_target);
Jeremy Fitzhardinged5303b82008-07-12 02:22:06 -07001004 if (ret != 0)
Jeremy Fitzhardinge62541c32008-07-10 16:24:08 -07001005 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001006 }
1007#endif /* CONFIG_X86_64 */
1008}
David Vrabelea9f9272014-06-16 13:07:00 +02001009
Mukesh Rathord285d682013-12-13 12:45:31 -05001010void __init xen_pvmmu_arch_setup(void)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001011{
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001012 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1013 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1014
Mukesh Rathord285d682013-12-13 12:45:31 -05001015 HYPERVISOR_vm_assist(VMASST_CMD_enable,
1016 VMASST_TYPE_pae_extended_cr3);
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001017
Jeremy Fitzhardinge88459d42008-07-08 15:07:02 -07001018 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1019 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1020 BUG();
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001021
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -07001022 xen_enable_sysenter();
Jeremy Fitzhardinge6fcac6d2008-07-08 15:07:14 -07001023 xen_enable_syscall();
Mukesh Rathord285d682013-12-13 12:45:31 -05001024}
1025
1026/* This function is not called for HVM domains */
1027void __init xen_arch_setup(void)
1028{
1029 xen_panic_handler_init();
1030 if (!xen_feature(XENFEAT_auto_translated_physmap))
1031 xen_pvmmu_arch_setup();
1032
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001033#ifdef CONFIG_ACPI
1034 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1035 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1036 disable_acpi();
1037 }
1038#endif
1039
1040 memcpy(boot_command_line, xen_start_info->cmd_line,
1041 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1042 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1043
Jeremy Fitzhardingebc15fde2010-11-22 17:17:50 -08001044 /* Set up idle, making sure it calls safe_halt() pvop */
Len Brownd91ee582011-04-01 18:28:35 -04001045 disable_cpuidle();
Konrad Rzeszutek Wilk48cdd822012-03-13 20:06:57 -04001046 disable_cpufreq();
Len Brown6a377dd2013-02-09 23:08:07 -05001047 WARN_ON(xen_set_default_idle());
Roland McGrathd2eea682007-07-20 00:31:43 -07001048 fiddle_vdso();
Konrad Rzeszutek Wilk8d54db792012-08-17 10:22:37 -04001049#ifdef CONFIG_NUMA
1050 numa_off = 1;
1051#endif
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -07001052}