blob: 4ba024d5b63ae7b02fda4add0cac3945d8d02ee8 [file] [log] [blame]
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09001#include <linux/gfp.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +05302#include <linux/initrd.h>
Pekka Enberg540aca02009-03-04 11:46:40 +02003#include <linux/ioport.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +02004#include <linux/swap.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07005#include <linux/memblock.h>
Andi Kleen377eeaa2018-06-13 15:48:28 -07006#include <linux/swapfile.h>
7#include <linux/swapops.h>
Qian Cai0d021132019-04-23 12:58:11 -04008#include <linux/kmemleak.h>
Nadav Amit4fc19702019-04-26 16:22:46 -07009#include <linux/sched/task.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020010
Laura Abbottd1163652017-05-08 15:58:11 -070011#include <asm/set_memory.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010012#include <asm/e820/api.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020013#include <asm/init.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020014#include <asm/page.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020015#include <asm/page_types.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020016#include <asm/sections.h>
Jan Beulich49834392009-05-06 13:06:47 +010017#include <asm/setup.h>
Pekka Enbergf7650902009-03-05 14:55:05 +020018#include <asm/tlbflush.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030019#include <asm/tlb.h>
Jaswinder Singh Rajput76c06922009-07-01 19:54:23 +053020#include <asm/proto.h>
Pekka Enberg17623912011-11-01 15:58:22 +020021#include <asm/dma.h> /* for MAX_DMA_PFN */
Fenghua Yucd745be2012-12-20 23:44:31 -080022#include <asm/microcode.h>
Thomas Garnier0483e1f2016-06-21 17:47:02 -070023#include <asm/kaslr.h>
Juergen Grossc138d812017-07-28 12:23:12 +020024#include <asm/hypervisor.h>
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -070025#include <asm/cpufeature.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010026#include <asm/pti.h>
Nadav Amit4fc19702019-04-26 16:22:46 -070027#include <asm/text-patching.h>
Benjamin Thield5249bc2020-06-06 14:26:29 +020028#include <asm/memtype.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030029
Dave Hansend17d8f92014-07-31 08:40:59 -070030/*
31 * We need to define the tracepoints somewhere, and tlb.c
Ingo Molnard9f6e122021-03-18 15:28:01 +010032 * is only compiled when SMP=y.
Dave Hansend17d8f92014-07-31 08:40:59 -070033 */
34#define CREATE_TRACE_POINTS
35#include <trace/events/tlb.h>
36
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080037#include "mm_internal.h"
38
Juergen Gross281d4072014-11-03 14:01:47 +010039/*
40 * Tables translating between page_cache_type_t and pte encoding.
Ingo Molnarc709fed2015-03-05 08:58:44 +010041 *
Toshi Kanid5dc8612015-07-22 12:06:11 -060042 * The default values are defined statically as minimal supported mode;
43 * WC and WT fall back to UC-. pat_init() updates these values to support
44 * more cache modes, WC and WT, when it is safe to do so. See pat_init()
45 * for the details. Note, __early_ioremap() used during early boot-time
46 * takes pgprot_t (pte encoding) and does not use these tables.
Ingo Molnarc709fed2015-03-05 08:58:44 +010047 *
48 * Index into __cachemode2pte_tbl[] is the cachemode.
49 *
50 * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
Juergen Gross281d4072014-11-03 14:01:47 +010052 */
Christoph Hellwigde17a372020-04-08 17:27:45 +020053static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010054 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020055 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
Ingo Molnarc709fed2015-03-05 08:58:44 +010056 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
Juergen Gross281d4072014-11-03 14:01:47 +010060};
Ingo Molnarc709fed2015-03-05 08:58:44 +010061
Christoph Hellwigde17a372020-04-08 17:27:45 +020062unsigned long cachemode2protval(enum page_cache_mode pcm)
63{
64 if (likely(pcm == 0))
65 return 0;
66 return __cachemode2pte_tbl[pcm];
67}
68EXPORT_SYMBOL(cachemode2protval);
Juergen Gross31bb7722015-01-22 12:43:17 +010069
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020070static uint8_t __pte2cachemode_tbl[8] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010071 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020072 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010073 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020076 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010077 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Juergen Gross281d4072014-11-03 14:01:47 +010078 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79};
Juergen Gross281d4072014-11-03 14:01:47 +010080
Christoph Hellwig1f6f6552020-04-08 17:27:42 +020081/* Check that the write-protect PAT entry is set for write-protect */
82bool x86_has_pat_wp(void)
83{
84 return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
85}
86
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020087enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
88{
89 unsigned long masked;
90
91 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
92 if (likely(masked == 0))
93 return 0;
94 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
95}
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080096
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080097static unsigned long __initdata pgt_buf_start;
Yinghai Luc9b32342013-01-24 12:19:42 -080098static unsigned long __initdata pgt_buf_end;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080099static unsigned long __initdata pgt_buf_top;
100
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800101static unsigned long min_pfn_mapped;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800102
103static bool __initdata can_use_brk_pgt = true;
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800104
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800105/*
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800106 * Pages returned are already directly mapped.
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800107 *
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800108 * Changing that is likely to break Xen, see commit:
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800109 *
110 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
111 *
Yinghai Luc9b32342013-01-24 12:19:42 -0800112 * for detailed information.
113 */
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800114__ref void *alloc_low_pages(unsigned int num)
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800115{
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800116 unsigned long pfn;
117 int i;
118
119 if (after_bootmem) {
120 unsigned int order;
121
122 order = get_order((unsigned long)num << PAGE_SHIFT);
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800123 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800124 }
125
126 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200127 unsigned long ret = 0;
128
129 if (min_pfn_mapped < max_pfn_mapped) {
Mike Rapoporta7259df2021-09-02 15:00:26 -0700130 ret = memblock_phys_alloc_range(
131 PAGE_SIZE * num, PAGE_SIZE,
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200132 min_pfn_mapped << PAGE_SHIFT,
Mike Rapoporta7259df2021-09-02 15:00:26 -0700133 max_pfn_mapped << PAGE_SHIFT);
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200134 }
Mike Rapoporta7259df2021-09-02 15:00:26 -0700135 if (!ret && can_use_brk_pgt)
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200136 ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
137
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800138 if (!ret)
139 panic("alloc_low_pages: can not alloc memory");
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200140
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800141 pfn = ret >> PAGE_SHIFT;
142 } else {
143 pfn = pgt_buf_end;
144 pgt_buf_end += num;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800145 }
146
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800147 for (i = 0; i < num; i++) {
148 void *adr;
149
150 adr = __va((pfn + i) << PAGE_SHIFT);
151 clear_page(adr);
152 }
153
154 return __va(pfn << PAGE_SHIFT);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800155}
156
Thomas Garnierfb754f92016-08-09 10:11:05 -0700157/*
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000158 * By default need to be able to allocate page tables below PGD firstly for
159 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
160 * With KASLR memory randomization, depending on the machine e820 memory and the
161 * PUD alignment, twice that many pages may be needed when KASLR memory
Thomas Garnierfb754f92016-08-09 10:11:05 -0700162 * randomization is enabled.
163 */
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000164
165#ifndef CONFIG_X86_5LEVEL
166#define INIT_PGD_PAGE_TABLES 3
Thomas Garnierfb754f92016-08-09 10:11:05 -0700167#else
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000168#define INIT_PGD_PAGE_TABLES 4
Thomas Garnierfb754f92016-08-09 10:11:05 -0700169#endif
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000170
171#ifndef CONFIG_RANDOMIZE_MEMORY
172#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
173#else
174#define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
175#endif
176
Thomas Garnierfb754f92016-08-09 10:11:05 -0700177#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
Yinghai Lu8d574702012-11-16 19:38:58 -0800178RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
179void __init early_alloc_pgt_buf(void)
180{
181 unsigned long tables = INIT_PGT_BUF_SIZE;
182 phys_addr_t base;
183
184 base = __pa(extend_brk(tables, PAGE_SIZE));
185
186 pgt_buf_start = base >> PAGE_SHIFT;
187 pgt_buf_end = pgt_buf_start;
188 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
189}
190
Pekka Enbergf7650902009-03-05 14:55:05 +0200191int after_bootmem;
192
Ingo Molnar10971ab2015-03-05 08:18:23 +0100193early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
Yinghai Lu148b2092012-11-16 19:39:08 -0800194
Jacob Shin844ab6f2012-10-24 14:24:44 -0500195struct map_range {
196 unsigned long start;
197 unsigned long end;
198 unsigned page_size_mask;
199};
200
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800201static int page_size_mask;
Pekka Enbergf7650902009-03-05 14:55:05 +0200202
Thomas Gleixner96f59fe2020-04-21 11:20:39 +0200203/*
204 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
205 * enable and PPro Global page enable), so that any CPU's that boot
206 * up after us can get the correct flags. Invoked on the boot CPU.
207 */
208static inline void cr4_set_bits_and_update_boot(unsigned long mask)
209{
210 mmu_cr4_features |= mask;
211 if (trampoline_cr4_features)
212 *trampoline_cr4_features = mmu_cr4_features;
213 cr4_set_bits(mask);
214}
215
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800216static void __init probe_page_size_mask(void)
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800217{
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800218 /*
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800219 * For pagealloc debugging, identity mapping will use small pages.
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800220 * This will simplify cpa(), which otherwise needs to support splitting
221 * large pages into small in interrupt context, etc.
222 */
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800223 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800224 page_size_mask |= 1 << PG_LEVEL_2M;
Vlastimil Babkad9ee35a2017-06-12 09:21:30 +0200225 else
226 direct_gbpages = 0;
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800227
228 /* Enable PSE if available */
Borislav Petkov16bf9222016-03-29 17:42:03 +0200229 if (boot_cpu_has(X86_FEATURE_PSE))
Andy Lutomirski375074c2014-10-24 15:58:07 -0700230 cr4_set_bits_and_update_boot(X86_CR4_PSE);
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800231
232 /* Enable PGE if available */
Dave Hansenc313ec62017-12-04 15:07:34 +0100233 __supported_pte_mask &= ~_PAGE_GLOBAL;
Borislav Petkovc109bf92016-03-29 17:42:02 +0200234 if (boot_cpu_has(X86_FEATURE_PGE)) {
Andy Lutomirski375074c2014-10-24 15:58:07 -0700235 cr4_set_bits_and_update_boot(X86_CR4_PGE);
Dave Hansen39114b72018-04-06 13:55:17 -0700236 __supported_pte_mask |= _PAGE_GLOBAL;
Dave Hansenc313ec62017-12-04 15:07:34 +0100237 }
Ingo Molnare61980a2015-03-05 08:25:01 +0100238
Dave Hansen8a57f482018-04-06 13:55:06 -0700239 /* By the default is everything supported: */
240 __default_kernel_pte_mask = __supported_pte_mask;
241 /* Except when with PTI where the kernel is mostly non-Global: */
242 if (cpu_feature_enabled(X86_FEATURE_PTI))
243 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
244
Ingo Molnare61980a2015-03-05 08:25:01 +0100245 /* Enable 1 GB linear kernel mappings if available: */
Borislav Petkovb8291adc2016-03-29 17:41:58 +0200246 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
Ingo Molnare61980a2015-03-05 08:25:01 +0100247 printk(KERN_INFO "Using GB pages for direct mapping\n");
248 page_size_mask |= 1 << PG_LEVEL_1G;
249 } else {
250 direct_gbpages = 0;
251 }
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800252}
Stefano Stabellini279b7062011-04-14 15:49:41 +0100253
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700254static void setup_pcid(void)
255{
Dave Hansen6cff64b2017-12-04 15:08:01 +0100256 if (!IS_ENABLED(CONFIG_X86_64))
257 return;
258
259 if (!boot_cpu_has(X86_FEATURE_PCID))
260 return;
261
262 if (boot_cpu_has(X86_FEATURE_PGE)) {
263 /*
264 * This can't be cr4_set_bits_and_update_boot() -- the
265 * trampoline code can't handle CR4.PCIDE and it wouldn't
266 * do any good anyway. Despite the name,
267 * cr4_set_bits_and_update_boot() doesn't actually cause
268 * the bits in question to remain set all the way through
269 * the secondary boot asm.
270 *
271 * Instead, we brute-force it and set CR4.PCIDE manually in
272 * start_secondary().
273 */
274 cr4_set_bits(X86_CR4_PCIDE);
275
276 /*
277 * INVPCID's single-context modes (2/3) only work if we set
278 * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
279 * on systems that have X86_CR4_PCIDE clear, or that have
280 * no INVPCID support at all.
281 */
282 if (boot_cpu_has(X86_FEATURE_INVPCID))
283 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
284 } else {
285 /*
286 * flush_tlb_all(), as currently implemented, won't work if
287 * PCID is on but PGE is not. Since that combination
288 * doesn't exist on real hardware, there's no reason to try
289 * to fully support it, but it's polite to avoid corrupting
290 * data if we're on an improperly configured VM.
291 */
292 setup_clear_cpu_cap(X86_FEATURE_PCID);
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700293 }
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700294}
295
Pekka Enbergf7650902009-03-05 14:55:05 +0200296#ifdef CONFIG_X86_32
297#define NR_RANGE_MR 3
298#else /* CONFIG_X86_64 */
299#define NR_RANGE_MR 5
300#endif
301
Jan Beulichdc9dd5c2009-03-12 12:40:06 +0000302static int __meminit save_mr(struct map_range *mr, int nr_range,
303 unsigned long start_pfn, unsigned long end_pfn,
304 unsigned long page_size_mask)
Pekka Enbergf7650902009-03-05 14:55:05 +0200305{
306 if (start_pfn < end_pfn) {
307 if (nr_range >= NR_RANGE_MR)
308 panic("run out of range for init_memory_mapping\n");
309 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
310 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
311 mr[nr_range].page_size_mask = page_size_mask;
312 nr_range++;
313 }
314
315 return nr_range;
316}
317
Yinghai Luaeebe842012-11-16 19:38:55 -0800318/*
319 * adjust the page_size_mask for small range to go with
320 * big page size instead small one if nearby are ram too.
321 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700322static void __ref adjust_range_page_size_mask(struct map_range *mr,
Yinghai Luaeebe842012-11-16 19:38:55 -0800323 int nr_range)
324{
325 int i;
326
327 for (i = 0; i < nr_range; i++) {
328 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
329 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
330 unsigned long start = round_down(mr[i].start, PMD_SIZE);
331 unsigned long end = round_up(mr[i].end, PMD_SIZE);
332
333#ifdef CONFIG_X86_32
334 if ((end >> PAGE_SHIFT) > max_low_pfn)
335 continue;
336#endif
337
338 if (memblock_is_region_memory(start, end - start))
339 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
340 }
341 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
342 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
343 unsigned long start = round_down(mr[i].start, PUD_SIZE);
344 unsigned long end = round_up(mr[i].end, PUD_SIZE);
345
346 if (memblock_is_region_memory(start, end - start))
347 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
348 }
349 }
350}
351
Dave Hansenf15e0512015-02-10 13:20:30 -0800352static const char *page_size_string(struct map_range *mr)
353{
354 static const char str_1g[] = "1G";
355 static const char str_2m[] = "2M";
356 static const char str_4m[] = "4M";
357 static const char str_4k[] = "4k";
358
359 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
360 return str_1g;
361 /*
362 * 32-bit without PAE has a 4M large page size.
363 * PG_LEVEL_2M is misnamed, but we can at least
364 * print out the right size in the string.
365 */
366 if (IS_ENABLED(CONFIG_X86_32) &&
367 !IS_ENABLED(CONFIG_X86_PAE) &&
368 mr->page_size_mask & (1<<PG_LEVEL_2M))
369 return str_4m;
370
371 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
372 return str_2m;
373
374 return str_4k;
375}
376
Yinghai Lu4e33e062012-11-16 19:38:39 -0800377static int __meminit split_mem_range(struct map_range *mr, int nr_range,
378 unsigned long start,
379 unsigned long end)
Pekka Enbergf7650902009-03-05 14:55:05 +0200380{
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800381 unsigned long start_pfn, end_pfn, limit_pfn;
Yinghai Lu1829ae92012-11-16 19:39:14 -0800382 unsigned long pfn;
Yinghai Lu4e33e062012-11-16 19:38:39 -0800383 int i;
Pekka Enbergf7650902009-03-05 14:55:05 +0200384
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800385 limit_pfn = PFN_DOWN(end);
386
Pekka Enbergf7650902009-03-05 14:55:05 +0200387 /* head if not big page alignment ? */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800388 pfn = start_pfn = PFN_DOWN(start);
Pekka Enbergf7650902009-03-05 14:55:05 +0200389#ifdef CONFIG_X86_32
390 /*
391 * Don't use a large page for the first 2/4MB of memory
392 * because there are often fixed size MTRRs in there
393 * and overlapping MTRRs into large pages can cause
394 * slowdowns.
395 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800396 if (pfn == 0)
Yinghai Lu84d77002012-11-16 19:39:13 -0800397 end_pfn = PFN_DOWN(PMD_SIZE);
Pekka Enbergf7650902009-03-05 14:55:05 +0200398 else
Yinghai Lu1829ae92012-11-16 19:39:14 -0800399 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200400#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800401 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200402#endif
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800403 if (end_pfn > limit_pfn)
404 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200405 if (start_pfn < end_pfn) {
406 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
Yinghai Lu1829ae92012-11-16 19:39:14 -0800407 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200408 }
409
410 /* big page (2M) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800411 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200412#ifdef CONFIG_X86_32
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800413 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200414#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800415 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800416 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
417 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200418#endif
419
420 if (start_pfn < end_pfn) {
421 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
422 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800423 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200424 }
425
426#ifdef CONFIG_X86_64
427 /* big page (1G) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800428 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800429 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200430 if (start_pfn < end_pfn) {
431 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
432 page_size_mask &
433 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800434 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200435 }
436
437 /* tail is not big page (1G) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800438 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800439 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200440 if (start_pfn < end_pfn) {
441 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
442 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800443 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200444 }
445#endif
446
447 /* tail is not big page (2M) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800448 start_pfn = pfn;
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800449 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200450 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
451
Yinghai Lu7de3d662013-05-31 08:53:07 -0700452 if (!after_bootmem)
453 adjust_range_page_size_mask(mr, nr_range);
454
Pekka Enbergf7650902009-03-05 14:55:05 +0200455 /* try to merge same page size and continuous */
456 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
457 unsigned long old_start;
458 if (mr[i].end != mr[i+1].start ||
459 mr[i].page_size_mask != mr[i+1].page_size_mask)
460 continue;
461 /* move it */
462 old_start = mr[i].start;
463 memmove(&mr[i], &mr[i+1],
464 (nr_range - 1 - i) * sizeof(struct map_range));
465 mr[i--].start = old_start;
466 nr_range--;
467 }
468
469 for (i = 0; i < nr_range; i++)
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400470 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
Bjorn Helgaas365811d2012-05-29 15:06:29 -0700471 mr[i].start, mr[i].end - 1,
Dave Hansenf15e0512015-02-10 13:20:30 -0800472 page_size_string(&mr[i]));
Pekka Enbergf7650902009-03-05 14:55:05 +0200473
Yinghai Lu4e33e062012-11-16 19:38:39 -0800474 return nr_range;
475}
476
Ingo Molnar08b46d52017-01-28 17:29:08 +0100477struct range pfn_mapped[E820_MAX_ENTRIES];
Yinghai Lu0e691cf2013-01-24 12:20:05 -0800478int nr_pfn_mapped;
Jacob Shin66520eb2012-11-16 19:38:52 -0800479
480static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
481{
Ingo Molnar08b46d52017-01-28 17:29:08 +0100482 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
Jacob Shin66520eb2012-11-16 19:38:52 -0800483 nr_pfn_mapped, start_pfn, end_pfn);
Ingo Molnar08b46d52017-01-28 17:29:08 +0100484 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
Jacob Shin66520eb2012-11-16 19:38:52 -0800485
486 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
487
488 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
489 max_low_pfn_mapped = max(max_low_pfn_mapped,
490 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
491}
492
493bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
494{
495 int i;
496
497 for (i = 0; i < nr_pfn_mapped; i++)
498 if ((start_pfn >= pfn_mapped[i].start) &&
499 (end_pfn <= pfn_mapped[i].end))
500 return true;
501
502 return false;
503}
504
Yinghai Lu2086fe12012-11-16 19:38:40 -0800505/*
Yinghai Lu4e33e062012-11-16 19:38:39 -0800506 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
507 * This runs before bootmem is initialized and gets pages directly from
508 * the physical memory. To access them they are temporarily mapped.
509 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700510unsigned long __ref init_memory_mapping(unsigned long start,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700511 unsigned long end, pgprot_t prot)
Yinghai Lu4e33e062012-11-16 19:38:39 -0800512{
513 struct map_range mr[NR_RANGE_MR];
514 unsigned long ret = 0;
515 int nr_range, i;
516
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400517 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
Yinghai Lu4e33e062012-11-16 19:38:39 -0800518 start, end - 1);
519
520 memset(mr, 0, sizeof(mr));
521 nr_range = split_mem_range(mr, 0, start, end);
522
Pekka Enbergf7650902009-03-05 14:55:05 +0200523 for (i = 0; i < nr_range; i++)
524 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700525 mr[i].page_size_mask,
526 prot);
Pekka Enbergf7650902009-03-05 14:55:05 +0200527
Jacob Shin66520eb2012-11-16 19:38:52 -0800528 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
529
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800530 return ret >> PAGE_SHIFT;
531}
532
Jacob Shin66520eb2012-11-16 19:38:52 -0800533/*
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800534 * We need to iterate through the E820 memory map and create direct mappings
Ingo Molnar09821ff2017-01-28 17:09:33 +0100535 * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800536 * create direct mappings for all pfns from [0 to max_low_pfn) and
537 * [4GB to max_pfn) because of possible memory holes in high addresses
538 * that cannot be marked as UC by fixed/variable range MTRRs.
539 * Depending on the alignment of E820 ranges, this may possibly result
540 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
541 *
542 * init_mem_mapping() calls init_range_memory_mapping() with big range.
543 * That range would have hole in the middle or ends, and only ram parts
544 * will be mapped in init_range_memory_mapping().
Jacob Shin66520eb2012-11-16 19:38:52 -0800545 */
Yinghai Lu8d574702012-11-16 19:38:58 -0800546static unsigned long __init init_range_memory_mapping(
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800547 unsigned long r_start,
548 unsigned long r_end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800549{
550 unsigned long start_pfn, end_pfn;
Yinghai Lu8d574702012-11-16 19:38:58 -0800551 unsigned long mapped_ram_size = 0;
Jacob Shin66520eb2012-11-16 19:38:52 -0800552 int i;
553
Jacob Shin66520eb2012-11-16 19:38:52 -0800554 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800555 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
556 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
557 if (start >= end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800558 continue;
559
Yinghai Luc9b32342013-01-24 12:19:42 -0800560 /*
561 * if it is overlapping with brk pgt, we need to
562 * alloc pgt buf from memblock instead.
563 */
564 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
565 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700566 init_memory_mapping(start, end, PAGE_KERNEL);
Yinghai Lu8d574702012-11-16 19:38:58 -0800567 mapped_ram_size += end - start;
Yinghai Luc9b32342013-01-24 12:19:42 -0800568 can_use_brk_pgt = true;
Jacob Shin66520eb2012-11-16 19:38:52 -0800569 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800570
571 return mapped_ram_size;
Jacob Shin66520eb2012-11-16 19:38:52 -0800572}
573
Yinghai Lu69792872013-09-06 19:07:09 -0700574static unsigned long __init get_new_step_size(unsigned long step_size)
575{
576 /*
Jan Beulich132978b2014-12-19 16:10:54 +0000577 * Initial mapped size is PMD_SIZE (2M).
Yinghai Lu69792872013-09-06 19:07:09 -0700578 * We can not set step_size to be PUD_SIZE (1G) yet.
579 * In worse case, when we cross the 1G boundary, and
580 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
Jan Beulich132978b2014-12-19 16:10:54 +0000581 * to map 1G range with PTE. Hence we use one less than the
582 * difference of page table level shifts.
Yinghai Lu69792872013-09-06 19:07:09 -0700583 *
Jan Beulich132978b2014-12-19 16:10:54 +0000584 * Don't need to worry about overflow in the top-down case, on 32bit,
585 * when step_size is 0, round_down() returns 0 for start, and that
586 * turns it into 0x100000000ULL.
587 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
588 * needs to be taken into consideration by the code below.
Yinghai Lu69792872013-09-06 19:07:09 -0700589 */
Jan Beulich132978b2014-12-19 16:10:54 +0000590 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
Yinghai Lu69792872013-09-06 19:07:09 -0700591}
592
Tang Chen0167d7d2013-11-12 15:08:02 -0800593/**
594 * memory_map_top_down - Map [map_start, map_end) top down
595 * @map_start: start address of the target memory range
596 * @map_end: end address of the target memory range
597 *
598 * This function will setup direct mapping for memory range
599 * [map_start, map_end) in top-down. That said, the page tables
600 * will be allocated at the end of the memory, and we map the
601 * memory in top-down.
602 */
603static void __init memory_map_top_down(unsigned long map_start,
604 unsigned long map_end)
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800605{
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200606 unsigned long real_end, last_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800607 unsigned long step_size;
608 unsigned long addr;
609 unsigned long mapped_ram_size = 0;
Yinghai Luab951932012-11-16 19:38:45 -0800610
Mike Rapoporta7259df2021-09-02 15:00:26 -0700611 /*
612 * Systems that have many reserved areas near top of the memory,
613 * e.g. QEMU with less than 1G RAM and EFI enabled, or Xen, will
614 * require lots of 4K mappings which may exhaust pgt_buf.
615 * Start with top-most PMD_SIZE range aligned at PMD_SIZE to ensure
616 * there is enough mapped memory that can be allocated from
617 * memblock.
618 */
619 addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
620 map_end);
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700621 memblock_phys_free(addr, PMD_SIZE);
Yinghai Lu8d574702012-11-16 19:38:58 -0800622 real_end = addr + PMD_SIZE;
623
624 /* step_size need to be small so pgt_buf from BRK could cover it */
625 step_size = PMD_SIZE;
626 max_pfn_mapped = 0; /* will get exact value next */
627 min_pfn_mapped = real_end >> PAGE_SHIFT;
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200628 last_start = real_end;
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800629
630 /*
631 * We start from the top (end of memory) and go to the bottom.
632 * The memblock_find_in_range() gets us a block of RAM from the
633 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
634 * for page table.
635 */
Tang Chen0167d7d2013-11-12 15:08:02 -0800636 while (last_start > map_start) {
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200637 unsigned long start;
638
Yinghai Lu8d574702012-11-16 19:38:58 -0800639 if (last_start > step_size) {
640 start = round_down(last_start - 1, step_size);
Tang Chen0167d7d2013-11-12 15:08:02 -0800641 if (start < map_start)
642 start = map_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800643 } else
Tang Chen0167d7d2013-11-12 15:08:02 -0800644 start = map_start;
Jan Beulich132978b2014-12-19 16:10:54 +0000645 mapped_ram_size += init_range_memory_mapping(start,
Yinghai Lu8d574702012-11-16 19:38:58 -0800646 last_start);
647 last_start = start;
648 min_pfn_mapped = last_start >> PAGE_SHIFT;
Jan Beulich132978b2014-12-19 16:10:54 +0000649 if (mapped_ram_size >= step_size)
Yinghai Lu69792872013-09-06 19:07:09 -0700650 step_size = get_new_step_size(step_size);
Yinghai Lu8d574702012-11-16 19:38:58 -0800651 }
652
Tang Chen0167d7d2013-11-12 15:08:02 -0800653 if (real_end < map_end)
654 init_range_memory_mapping(real_end, map_end);
655}
656
Tang Chenb959ed6c2013-11-12 15:08:05 -0800657/**
658 * memory_map_bottom_up - Map [map_start, map_end) bottom up
659 * @map_start: start address of the target memory range
660 * @map_end: end address of the target memory range
661 *
662 * This function will setup direct mapping for memory range
663 * [map_start, map_end) in bottom-up. Since we have limited the
664 * bottom-up allocation above the kernel, the page tables will
665 * be allocated just above the kernel and we map the memory
666 * in [map_start, map_end) in bottom-up.
667 */
668static void __init memory_map_bottom_up(unsigned long map_start,
669 unsigned long map_end)
670{
Jan Beulich132978b2014-12-19 16:10:54 +0000671 unsigned long next, start;
Tang Chenb959ed6c2013-11-12 15:08:05 -0800672 unsigned long mapped_ram_size = 0;
673 /* step_size need to be small so pgt_buf from BRK could cover it */
674 unsigned long step_size = PMD_SIZE;
675
676 start = map_start;
677 min_pfn_mapped = start >> PAGE_SHIFT;
678
679 /*
680 * We start from the bottom (@map_start) and go to the top (@map_end).
681 * The memblock_find_in_range() gets us a block of RAM from the
682 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
683 * for page table.
684 */
685 while (start < map_end) {
Jan Beulich132978b2014-12-19 16:10:54 +0000686 if (step_size && map_end - start > step_size) {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800687 next = round_up(start + 1, step_size);
688 if (next > map_end)
689 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000690 } else {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800691 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000692 }
Tang Chenb959ed6c2013-11-12 15:08:05 -0800693
Jan Beulich132978b2014-12-19 16:10:54 +0000694 mapped_ram_size += init_range_memory_mapping(start, next);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800695 start = next;
696
Jan Beulich132978b2014-12-19 16:10:54 +0000697 if (mapped_ram_size >= step_size)
Tang Chenb959ed6c2013-11-12 15:08:05 -0800698 step_size = get_new_step_size(step_size);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800699 }
700}
701
Mike Rapoport88107d32020-06-08 21:33:01 -0700702/*
703 * The real mode trampoline, which is required for bootstrapping CPUs
704 * occupies only a small area under the low 1MB. See reserve_real_mode()
705 * for details.
706 *
707 * If KASLR is disabled the first PGD entry of the direct mapping is copied
708 * to map the real mode trampoline.
709 *
710 * If KASLR is enabled, copy only the PUD which covers the low 1MB
711 * area. This limits the randomization granularity to 1GB for both 4-level
712 * and 5-level paging.
713 */
714static void __init init_trampoline(void)
715{
716#ifdef CONFIG_X86_64
Joerg Roedel9de49992021-12-02 16:32:23 +0100717 /*
718 * The code below will alias kernel page-tables in the user-range of the
719 * address space, including the Global bit. So global TLB entries will
720 * be created when using the trampoline page-table.
721 */
Mike Rapoport88107d32020-06-08 21:33:01 -0700722 if (!kaslr_memory_enabled())
723 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
724 else
725 init_trampoline_kaslr();
726#endif
727}
728
Tang Chen0167d7d2013-11-12 15:08:02 -0800729void __init init_mem_mapping(void)
730{
731 unsigned long end;
732
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100733 pti_check_boottime_disable();
Tang Chen0167d7d2013-11-12 15:08:02 -0800734 probe_page_size_mask();
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700735 setup_pcid();
Tang Chen0167d7d2013-11-12 15:08:02 -0800736
737#ifdef CONFIG_X86_64
738 end = max_pfn << PAGE_SHIFT;
739#else
740 end = max_low_pfn << PAGE_SHIFT;
741#endif
742
743 /* the ISA range is always mapped regardless of memory holes */
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700744 init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
Tang Chen0167d7d2013-11-12 15:08:02 -0800745
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700746 /* Init the trampoline, possibly with KASLR memory offset */
747 init_trampoline();
748
Tang Chenb959ed6c2013-11-12 15:08:05 -0800749 /*
750 * If the allocation is in bottom-up direction, we setup direct mapping
751 * in bottom-up, otherwise we setup direct mapping in top-down.
752 */
753 if (memblock_bottom_up()) {
754 unsigned long kernel_end = __pa_symbol(_end);
755
756 /*
757 * we need two separate calls here. This is because we want to
758 * allocate page tables above the kernel. So we first map
759 * [kernel_end, end) to make memory above the kernel be mapped
760 * as soon as possible. And then use page tables allocated above
761 * the kernel to map [ISA_END_ADDRESS, kernel_end).
762 */
763 memory_map_bottom_up(kernel_end, end);
764 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
765 } else {
766 memory_map_top_down(ISA_END_ADDRESS, end);
767 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800768
Yinghai Luf763ad12012-11-16 19:38:57 -0800769#ifdef CONFIG_X86_64
770 if (max_pfn > max_low_pfn) {
Ingo Molnar163b0992021-03-21 22:28:53 +0100771 /* can we preserve max_low_pfn ?*/
Yinghai Luf763ad12012-11-16 19:38:57 -0800772 max_low_pfn = max_pfn;
773 }
Yinghai Lu719272c2012-11-16 19:39:06 -0800774#else
775 early_ioremap_page_table_range_init();
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800776#endif
777
Yinghai Lu719272c2012-11-16 19:39:06 -0800778 load_cr3(swapper_pg_dir);
779 __flush_tlb_all();
Yinghai Lu719272c2012-11-16 19:39:06 -0800780
Juergen Grossf72e38e2017-11-09 14:27:35 +0100781 x86_init.hyper.init_mem_mapping();
Juergen Grossc138d812017-07-28 12:23:12 +0200782
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800783 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800784}
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200785
Pekka Enberg540aca02009-03-04 11:46:40 +0200786/*
Nadav Amit4fc19702019-04-26 16:22:46 -0700787 * Initialize an mm_struct to be used during poking and a pointer to be used
788 * during patching.
789 */
790void __init poking_init(void)
791{
792 spinlock_t *ptl;
793 pte_t *ptep;
794
795 poking_mm = copy_init_mm();
796 BUG_ON(!poking_mm);
797
798 /*
799 * Randomize the poking address, but make sure that the following page
800 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
801 * and adjust the address if the PMD ends after the first one.
802 */
803 poking_addr = TASK_UNMAPPED_BASE;
804 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
805 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
806 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
807
808 if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
809 poking_addr += PAGE_SIZE;
810
811 /*
812 * We need to trigger the allocation of the page-tables that will be
813 * needed for poking now. Later, poking may be performed in an atomic
814 * section, which might cause allocation to fail.
815 */
816 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
817 BUG_ON(!ptep);
818 pte_unmap_unlock(ptep, ptl);
819}
820
821/*
Pekka Enberg540aca02009-03-04 11:46:40 +0200822 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
823 * is valid. The argument is a physical page number.
824 *
Kees Cooka4866aa2017-04-05 09:39:08 -0700825 * On x86, access has to be given to the first megabyte of RAM because that
826 * area traditionally contains BIOS code and data regions used by X, dosemu,
827 * and similar apps. Since they map the entire memory range, the whole range
828 * must be allowed (for mapping), but any areas that would otherwise be
829 * disallowed are flagged as being "zero filled" instead of rejected.
830 * Access has to be given to non-kernel-ram areas as well, these contain the
831 * PCI mmio resources as well as potential bios/acpi data regions.
Pekka Enberg540aca02009-03-04 11:46:40 +0200832 */
833int devmem_is_allowed(unsigned long pagenr)
834{
Dan Williams2bdce742018-06-14 15:26:24 -0700835 if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
836 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
837 != REGION_DISJOINT) {
Kees Cooka4866aa2017-04-05 09:39:08 -0700838 /*
839 * For disallowed memory regions in the low 1MB range,
840 * request that the page be shown as all zeros.
841 */
842 if (pagenr < 256)
843 return 2;
844
Pekka Enberg540aca02009-03-04 11:46:40 +0200845 return 0;
Kees Cooka4866aa2017-04-05 09:39:08 -0700846 }
847
848 /*
849 * This must follow RAM test, since System RAM is considered a
850 * restricted resource under CONFIG_STRICT_IOMEM.
851 */
852 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
853 /* Low 1MB bypasses iomem restrictions. */
854 if (pagenr < 256)
855 return 1;
856
857 return 0;
858 }
859
860 return 1;
Pekka Enberg540aca02009-03-04 11:46:40 +0200861}
862
Alexey Dobriyane5cb1132018-12-28 00:36:03 -0800863void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200864{
Yinghai Luc967da62010-03-28 19:42:55 -0700865 unsigned long begin_aligned, end_aligned;
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200866
Yinghai Luc967da62010-03-28 19:42:55 -0700867 /* Make sure boundaries are page aligned */
868 begin_aligned = PAGE_ALIGN(begin);
869 end_aligned = end & PAGE_MASK;
870
871 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
872 begin = begin_aligned;
873 end = end_aligned;
874 }
875
876 if (begin >= end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200877 return;
878
879 /*
880 * If debugging page accesses then do not free this memory but
881 * mark them not present - any buggy init-section access will
882 * create a kernel page fault:
883 */
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700884 if (debug_pagealloc_enabled()) {
885 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
886 begin, end - 1);
Qian Cai0d021132019-04-23 12:58:11 -0400887 /*
888 * Inform kmemleak about the hole in the memory since the
889 * corresponding pages will be unmapped.
890 */
891 kmemleak_free_part((void *)begin, end - begin);
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700892 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
893 } else {
894 /*
895 * We just marked the kernel text read only above, now that
896 * we are going to free part of that, we need to make that
897 * writeable and non-executable first.
898 */
899 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
900 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200901
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700902 free_reserved_area((void *)begin, (void *)end,
903 POISON_FREE_INITMEM, what);
904 }
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200905}
906
Dave Hansen6ea27382018-08-02 15:58:29 -0700907/*
908 * begin/end can be in the direct map or the "high kernel mapping"
909 * used for the kernel image only. free_init_pages() will do the
910 * right thing for either kind of address.
911 */
Kees Cook5494c3a2019-10-29 14:13:49 -0700912void free_kernel_image_pages(const char *what, void *begin, void *end)
Dave Hansen6ea27382018-08-02 15:58:29 -0700913{
Dave Hansenc40a56a2018-08-02 15:58:31 -0700914 unsigned long begin_ul = (unsigned long)begin;
915 unsigned long end_ul = (unsigned long)end;
916 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
917
Kees Cook5494c3a2019-10-29 14:13:49 -0700918 free_init_pages(what, begin_ul, end_ul);
Dave Hansenc40a56a2018-08-02 15:58:31 -0700919
920 /*
921 * PTI maps some of the kernel into userspace. For performance,
922 * this includes some kernel areas that do not contain secrets.
923 * Those areas might be adjacent to the parts of the kernel image
924 * being freed, which may contain secrets. Remove the "high kernel
925 * image mapping" for these freed areas, ensuring they are not even
926 * potentially vulnerable to Meltdown regardless of the specific
927 * optimizations PTI is currently using.
928 *
929 * The "noalias" prevents unmapping the direct map alias which is
930 * needed to access the freed pages.
931 *
932 * This is only valid for 64bit kernels. 32bit has only one mapping
933 * which can't be treated in this way for obvious reasons.
934 */
935 if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
936 set_memory_np_noalias(begin_ul, len_pages);
Dave Hansen6ea27382018-08-02 15:58:29 -0700937}
938
Denys Vlasenko18278222016-09-18 20:21:25 +0200939void __ref free_initmem(void)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200940{
Ingo Molnar0c6fc112017-01-28 22:52:16 +0100941 e820__reallocate_tables();
Denys Vlasenko47533962016-09-17 23:39:26 +0200942
Brijesh Singhb3f09072018-09-14 08:45:58 -0500943 mem_encrypt_free_decrypted_mem();
944
Kees Cook5494c3a2019-10-29 14:13:49 -0700945 free_kernel_image_pages("unused kernel image (initmem)",
946 &__init_begin, &__init_end);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200947}
Pekka Enberg731ddea2009-03-04 11:13:40 +0200948
949#ifdef CONFIG_BLK_DEV_INITRD
Jan Beulich0d26d1d2012-06-18 11:30:20 +0100950void __init free_initrd_mem(unsigned long start, unsigned long end)
Pekka Enberg731ddea2009-03-04 11:13:40 +0200951{
Fenghua Yucd745be2012-12-20 23:44:31 -0800952 /*
Yinghai Luc967da62010-03-28 19:42:55 -0700953 * end could be not aligned, and We can not align that,
Ingo Molnard9f6e122021-03-18 15:28:01 +0100954 * decompressor could be confused by aligned initrd_end
Yinghai Luc967da62010-03-28 19:42:55 -0700955 * We already reserve the end partial page before in
956 * - i386_start_kernel()
957 * - x86_64_start_kernel()
958 * - relocate_initrd()
959 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
960 */
Jiang Liuc88442e2013-07-03 15:02:58 -0700961 free_init_pages("initrd", start, PAGE_ALIGN(end));
Pekka Enberg731ddea2009-03-04 11:13:40 +0200962}
963#endif
Pekka Enberg17623912011-11-01 15:58:22 +0200964
Ingo Molnar4270fd82017-01-28 12:45:40 +0100965/*
966 * Calculate the precise size of the DMA zone (first 16 MB of RAM),
967 * and pass it to the MM layer - to help it set zone watermarks more
968 * accurately.
969 *
970 * Done on 64-bit systems only for the time being, although 32-bit systems
971 * might benefit from this as well.
972 */
973void __init memblock_find_dma_reserve(void)
974{
975#ifdef CONFIG_X86_64
976 u64 nr_pages = 0, nr_free_pages = 0;
977 unsigned long start_pfn, end_pfn;
978 phys_addr_t start_addr, end_addr;
979 int i;
980 u64 u;
981
982 /*
983 * Iterate over all memory ranges (free and reserved ones alike),
984 * to calculate the total number of pages in the first 16 MB of RAM:
985 */
986 nr_pages = 0;
987 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
988 start_pfn = min(start_pfn, MAX_DMA_PFN);
989 end_pfn = min(end_pfn, MAX_DMA_PFN);
990
991 nr_pages += end_pfn - start_pfn;
992 }
993
994 /*
995 * Iterate over free memory ranges to calculate the number of free
996 * pages in the DMA zone, while not counting potential partial
997 * pages at the beginning or the end of the range:
998 */
999 nr_free_pages = 0;
1000 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
1001 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
1002 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
1003
1004 if (start_pfn < end_pfn)
1005 nr_free_pages += end_pfn - start_pfn;
1006 }
1007
1008 set_dma_reserve(nr_pages - nr_free_pages);
1009#endif
1010}
1011
Pekka Enberg17623912011-11-01 15:58:22 +02001012void __init zone_sizes_init(void)
1013{
1014 unsigned long max_zone_pfns[MAX_NR_ZONES];
1015
1016 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1017
1018#ifdef CONFIG_ZONE_DMA
Xishi Qiuc072b902014-12-10 10:09:01 +08001019 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +02001020#endif
1021#ifdef CONFIG_ZONE_DMA32
Xishi Qiuc072b902014-12-10 10:09:01 +08001022 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +02001023#endif
1024 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1025#ifdef CONFIG_HIGHMEM
1026 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1027#endif
1028
Mike Rapoport9691a072020-06-03 15:57:10 -07001029 free_area_init(max_zone_pfns);
Pekka Enberg17623912011-11-01 15:58:22 +02001030}
1031
Nadav Amit2f4305b2021-02-20 15:17:08 -08001032__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
Andy Lutomirski3d28ebc2017-05-28 10:00:15 -07001033 .loaded_mm = &init_mm,
Andy Lutomirski10af6232017-07-24 21:41:38 -07001034 .next_asid = 1,
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001035 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1036};
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001037
Juergen Grossbd809af2014-11-03 14:02:03 +01001038void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1039{
1040 /* entry 0 MUST be WB (hardwired to speed up translations) */
1041 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1042
1043 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1044 __pte2cachemode_tbl[entry] = cache;
1045}
Andi Kleen377eeaa2018-06-13 15:48:28 -07001046
Vlastimil Babka792adb92018-08-14 20:50:47 +02001047#ifdef CONFIG_SWAP
Andi Kleen377eeaa2018-06-13 15:48:28 -07001048unsigned long max_swapfile_size(void)
1049{
1050 unsigned long pages;
1051
1052 pages = generic_max_swapfile_size();
1053
Michal Hocko5b5e4d62018-11-13 19:49:10 +01001054 if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
Andi Kleen377eeaa2018-06-13 15:48:28 -07001055 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
Vlastimil Babkab0a182f2018-08-23 15:44:18 +02001056 unsigned long long l1tf_limit = l1tf_pfn_limit();
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001057 /*
1058 * We encode swap offsets also with 3 bits below those for pfn
1059 * which makes the usable limit higher.
1060 */
Vlastimil Babka0d0f6242018-06-22 17:39:33 +02001061#if CONFIG_PGTABLE_LEVELS > 2
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001062 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1063#endif
Vlastimil Babka9df95162018-08-20 11:58:35 +02001064 pages = min_t(unsigned long long, l1tf_limit, pages);
Andi Kleen377eeaa2018-06-13 15:48:28 -07001065 }
1066 return pages;
1067}
Vlastimil Babka792adb92018-08-14 20:50:47 +02001068#endif