blob: dd694fb9391696e8cecfce2c5a8185786af423ba [file] [log] [blame]
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09001#include <linux/gfp.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +05302#include <linux/initrd.h>
Pekka Enberg540aca02009-03-04 11:46:40 +02003#include <linux/ioport.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +02004#include <linux/swap.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07005#include <linux/memblock.h>
Andi Kleen377eeaa2018-06-13 15:48:28 -07006#include <linux/swapfile.h>
7#include <linux/swapops.h>
Qian Cai0d021132019-04-23 12:58:11 -04008#include <linux/kmemleak.h>
Nadav Amit4fc19702019-04-26 16:22:46 -07009#include <linux/sched/task.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020010
Laura Abbottd1163652017-05-08 15:58:11 -070011#include <asm/set_memory.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010012#include <asm/e820/api.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020013#include <asm/init.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020014#include <asm/page.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020015#include <asm/page_types.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020016#include <asm/sections.h>
Jan Beulich49834392009-05-06 13:06:47 +010017#include <asm/setup.h>
Pekka Enbergf7650902009-03-05 14:55:05 +020018#include <asm/tlbflush.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030019#include <asm/tlb.h>
Jaswinder Singh Rajput76c06922009-07-01 19:54:23 +053020#include <asm/proto.h>
Pekka Enberg17623912011-11-01 15:58:22 +020021#include <asm/dma.h> /* for MAX_DMA_PFN */
Fenghua Yucd745be2012-12-20 23:44:31 -080022#include <asm/microcode.h>
Thomas Garnier0483e1f2016-06-21 17:47:02 -070023#include <asm/kaslr.h>
Juergen Grossc138d812017-07-28 12:23:12 +020024#include <asm/hypervisor.h>
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -070025#include <asm/cpufeature.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010026#include <asm/pti.h>
Nadav Amit4fc19702019-04-26 16:22:46 -070027#include <asm/text-patching.h>
Benjamin Thield5249bc2020-06-06 14:26:29 +020028#include <asm/memtype.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030029
Dave Hansend17d8f92014-07-31 08:40:59 -070030/*
31 * We need to define the tracepoints somewhere, and tlb.c
32 * is only compied when SMP=y.
33 */
34#define CREATE_TRACE_POINTS
35#include <trace/events/tlb.h>
36
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080037#include "mm_internal.h"
38
Juergen Gross281d4072014-11-03 14:01:47 +010039/*
40 * Tables translating between page_cache_type_t and pte encoding.
Ingo Molnarc709fed2015-03-05 08:58:44 +010041 *
Toshi Kanid5dc8612015-07-22 12:06:11 -060042 * The default values are defined statically as minimal supported mode;
43 * WC and WT fall back to UC-. pat_init() updates these values to support
44 * more cache modes, WC and WT, when it is safe to do so. See pat_init()
45 * for the details. Note, __early_ioremap() used during early boot-time
46 * takes pgprot_t (pte encoding) and does not use these tables.
Ingo Molnarc709fed2015-03-05 08:58:44 +010047 *
48 * Index into __cachemode2pte_tbl[] is the cachemode.
49 *
50 * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
Juergen Gross281d4072014-11-03 14:01:47 +010052 */
Christoph Hellwigde17a372020-04-08 17:27:45 +020053static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010054 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020055 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
Ingo Molnarc709fed2015-03-05 08:58:44 +010056 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
Juergen Gross281d4072014-11-03 14:01:47 +010060};
Ingo Molnarc709fed2015-03-05 08:58:44 +010061
Christoph Hellwigde17a372020-04-08 17:27:45 +020062unsigned long cachemode2protval(enum page_cache_mode pcm)
63{
64 if (likely(pcm == 0))
65 return 0;
66 return __cachemode2pte_tbl[pcm];
67}
68EXPORT_SYMBOL(cachemode2protval);
Juergen Gross31bb7722015-01-22 12:43:17 +010069
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020070static uint8_t __pte2cachemode_tbl[8] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010071 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020072 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010073 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020076 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010077 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Juergen Gross281d4072014-11-03 14:01:47 +010078 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79};
Juergen Gross281d4072014-11-03 14:01:47 +010080
Christoph Hellwig1f6f6552020-04-08 17:27:42 +020081/* Check that the write-protect PAT entry is set for write-protect */
82bool x86_has_pat_wp(void)
83{
84 return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
85}
86
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020087enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
88{
89 unsigned long masked;
90
91 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
92 if (likely(masked == 0))
93 return 0;
94 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
95}
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080096
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080097static unsigned long __initdata pgt_buf_start;
Yinghai Luc9b32342013-01-24 12:19:42 -080098static unsigned long __initdata pgt_buf_end;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080099static unsigned long __initdata pgt_buf_top;
100
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800101static unsigned long min_pfn_mapped;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800102
103static bool __initdata can_use_brk_pgt = true;
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800104
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800105/*
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800106 * Pages returned are already directly mapped.
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800107 *
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800108 * Changing that is likely to break Xen, see commit:
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800109 *
110 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
111 *
Yinghai Luc9b32342013-01-24 12:19:42 -0800112 * for detailed information.
113 */
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800114__ref void *alloc_low_pages(unsigned int num)
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800115{
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800116 unsigned long pfn;
117 int i;
118
119 if (after_bootmem) {
120 unsigned int order;
121
122 order = get_order((unsigned long)num << PAGE_SHIFT);
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800123 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800124 }
125
126 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200127 unsigned long ret = 0;
128
129 if (min_pfn_mapped < max_pfn_mapped) {
130 ret = memblock_find_in_range(
131 min_pfn_mapped << PAGE_SHIFT,
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800132 max_pfn_mapped << PAGE_SHIFT,
133 PAGE_SIZE * num , PAGE_SIZE);
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200134 }
135 if (ret)
136 memblock_reserve(ret, PAGE_SIZE * num);
137 else if (can_use_brk_pgt)
138 ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
139
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800140 if (!ret)
141 panic("alloc_low_pages: can not alloc memory");
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200142
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800143 pfn = ret >> PAGE_SHIFT;
144 } else {
145 pfn = pgt_buf_end;
146 pgt_buf_end += num;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800147 }
148
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800149 for (i = 0; i < num; i++) {
150 void *adr;
151
152 adr = __va((pfn + i) << PAGE_SHIFT);
153 clear_page(adr);
154 }
155
156 return __va(pfn << PAGE_SHIFT);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800157}
158
Thomas Garnierfb754f92016-08-09 10:11:05 -0700159/*
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000160 * By default need to be able to allocate page tables below PGD firstly for
161 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping.
162 * With KASLR memory randomization, depending on the machine e820 memory and the
163 * PUD alignment, twice that many pages may be needed when KASLR memory
Thomas Garnierfb754f92016-08-09 10:11:05 -0700164 * randomization is enabled.
165 */
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000166
167#ifndef CONFIG_X86_5LEVEL
168#define INIT_PGD_PAGE_TABLES 3
Thomas Garnierfb754f92016-08-09 10:11:05 -0700169#else
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000170#define INIT_PGD_PAGE_TABLES 4
Thomas Garnierfb754f92016-08-09 10:11:05 -0700171#endif
Lorenzo Stoakes167dcfc2020-12-15 20:56:41 +0000172
173#ifndef CONFIG_RANDOMIZE_MEMORY
174#define INIT_PGD_PAGE_COUNT (2 * INIT_PGD_PAGE_TABLES)
175#else
176#define INIT_PGD_PAGE_COUNT (4 * INIT_PGD_PAGE_TABLES)
177#endif
178
Thomas Garnierfb754f92016-08-09 10:11:05 -0700179#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
Yinghai Lu8d574702012-11-16 19:38:58 -0800180RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
181void __init early_alloc_pgt_buf(void)
182{
183 unsigned long tables = INIT_PGT_BUF_SIZE;
184 phys_addr_t base;
185
186 base = __pa(extend_brk(tables, PAGE_SIZE));
187
188 pgt_buf_start = base >> PAGE_SHIFT;
189 pgt_buf_end = pgt_buf_start;
190 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
191}
192
Pekka Enbergf7650902009-03-05 14:55:05 +0200193int after_bootmem;
194
Ingo Molnar10971ab2015-03-05 08:18:23 +0100195early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
Yinghai Lu148b2092012-11-16 19:39:08 -0800196
Jacob Shin844ab6f2012-10-24 14:24:44 -0500197struct map_range {
198 unsigned long start;
199 unsigned long end;
200 unsigned page_size_mask;
201};
202
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800203static int page_size_mask;
Pekka Enbergf7650902009-03-05 14:55:05 +0200204
Thomas Gleixner96f59fe2020-04-21 11:20:39 +0200205/*
206 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
207 * enable and PPro Global page enable), so that any CPU's that boot
208 * up after us can get the correct flags. Invoked on the boot CPU.
209 */
210static inline void cr4_set_bits_and_update_boot(unsigned long mask)
211{
212 mmu_cr4_features |= mask;
213 if (trampoline_cr4_features)
214 *trampoline_cr4_features = mmu_cr4_features;
215 cr4_set_bits(mask);
216}
217
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800218static void __init probe_page_size_mask(void)
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800219{
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800220 /*
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800221 * For pagealloc debugging, identity mapping will use small pages.
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800222 * This will simplify cpa(), which otherwise needs to support splitting
223 * large pages into small in interrupt context, etc.
224 */
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800225 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800226 page_size_mask |= 1 << PG_LEVEL_2M;
Vlastimil Babkad9ee35a2017-06-12 09:21:30 +0200227 else
228 direct_gbpages = 0;
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800229
230 /* Enable PSE if available */
Borislav Petkov16bf9222016-03-29 17:42:03 +0200231 if (boot_cpu_has(X86_FEATURE_PSE))
Andy Lutomirski375074c2014-10-24 15:58:07 -0700232 cr4_set_bits_and_update_boot(X86_CR4_PSE);
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800233
234 /* Enable PGE if available */
Dave Hansenc313ec62017-12-04 15:07:34 +0100235 __supported_pte_mask &= ~_PAGE_GLOBAL;
Borislav Petkovc109bf92016-03-29 17:42:02 +0200236 if (boot_cpu_has(X86_FEATURE_PGE)) {
Andy Lutomirski375074c2014-10-24 15:58:07 -0700237 cr4_set_bits_and_update_boot(X86_CR4_PGE);
Dave Hansen39114b72018-04-06 13:55:17 -0700238 __supported_pte_mask |= _PAGE_GLOBAL;
Dave Hansenc313ec62017-12-04 15:07:34 +0100239 }
Ingo Molnare61980a2015-03-05 08:25:01 +0100240
Dave Hansen8a57f482018-04-06 13:55:06 -0700241 /* By the default is everything supported: */
242 __default_kernel_pte_mask = __supported_pte_mask;
243 /* Except when with PTI where the kernel is mostly non-Global: */
244 if (cpu_feature_enabled(X86_FEATURE_PTI))
245 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
246
Ingo Molnare61980a2015-03-05 08:25:01 +0100247 /* Enable 1 GB linear kernel mappings if available: */
Borislav Petkovb8291adc2016-03-29 17:41:58 +0200248 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
Ingo Molnare61980a2015-03-05 08:25:01 +0100249 printk(KERN_INFO "Using GB pages for direct mapping\n");
250 page_size_mask |= 1 << PG_LEVEL_1G;
251 } else {
252 direct_gbpages = 0;
253 }
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800254}
Stefano Stabellini279b7062011-04-14 15:49:41 +0100255
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700256static void setup_pcid(void)
257{
Dave Hansen6cff64b2017-12-04 15:08:01 +0100258 if (!IS_ENABLED(CONFIG_X86_64))
259 return;
260
261 if (!boot_cpu_has(X86_FEATURE_PCID))
262 return;
263
264 if (boot_cpu_has(X86_FEATURE_PGE)) {
265 /*
266 * This can't be cr4_set_bits_and_update_boot() -- the
267 * trampoline code can't handle CR4.PCIDE and it wouldn't
268 * do any good anyway. Despite the name,
269 * cr4_set_bits_and_update_boot() doesn't actually cause
270 * the bits in question to remain set all the way through
271 * the secondary boot asm.
272 *
273 * Instead, we brute-force it and set CR4.PCIDE manually in
274 * start_secondary().
275 */
276 cr4_set_bits(X86_CR4_PCIDE);
277
278 /*
279 * INVPCID's single-context modes (2/3) only work if we set
280 * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
281 * on systems that have X86_CR4_PCIDE clear, or that have
282 * no INVPCID support at all.
283 */
284 if (boot_cpu_has(X86_FEATURE_INVPCID))
285 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
286 } else {
287 /*
288 * flush_tlb_all(), as currently implemented, won't work if
289 * PCID is on but PGE is not. Since that combination
290 * doesn't exist on real hardware, there's no reason to try
291 * to fully support it, but it's polite to avoid corrupting
292 * data if we're on an improperly configured VM.
293 */
294 setup_clear_cpu_cap(X86_FEATURE_PCID);
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700295 }
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700296}
297
Pekka Enbergf7650902009-03-05 14:55:05 +0200298#ifdef CONFIG_X86_32
299#define NR_RANGE_MR 3
300#else /* CONFIG_X86_64 */
301#define NR_RANGE_MR 5
302#endif
303
Jan Beulichdc9dd5c2009-03-12 12:40:06 +0000304static int __meminit save_mr(struct map_range *mr, int nr_range,
305 unsigned long start_pfn, unsigned long end_pfn,
306 unsigned long page_size_mask)
Pekka Enbergf7650902009-03-05 14:55:05 +0200307{
308 if (start_pfn < end_pfn) {
309 if (nr_range >= NR_RANGE_MR)
310 panic("run out of range for init_memory_mapping\n");
311 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
312 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
313 mr[nr_range].page_size_mask = page_size_mask;
314 nr_range++;
315 }
316
317 return nr_range;
318}
319
Yinghai Luaeebe842012-11-16 19:38:55 -0800320/*
321 * adjust the page_size_mask for small range to go with
322 * big page size instead small one if nearby are ram too.
323 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700324static void __ref adjust_range_page_size_mask(struct map_range *mr,
Yinghai Luaeebe842012-11-16 19:38:55 -0800325 int nr_range)
326{
327 int i;
328
329 for (i = 0; i < nr_range; i++) {
330 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
331 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
332 unsigned long start = round_down(mr[i].start, PMD_SIZE);
333 unsigned long end = round_up(mr[i].end, PMD_SIZE);
334
335#ifdef CONFIG_X86_32
336 if ((end >> PAGE_SHIFT) > max_low_pfn)
337 continue;
338#endif
339
340 if (memblock_is_region_memory(start, end - start))
341 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
342 }
343 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
344 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
345 unsigned long start = round_down(mr[i].start, PUD_SIZE);
346 unsigned long end = round_up(mr[i].end, PUD_SIZE);
347
348 if (memblock_is_region_memory(start, end - start))
349 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
350 }
351 }
352}
353
Dave Hansenf15e0512015-02-10 13:20:30 -0800354static const char *page_size_string(struct map_range *mr)
355{
356 static const char str_1g[] = "1G";
357 static const char str_2m[] = "2M";
358 static const char str_4m[] = "4M";
359 static const char str_4k[] = "4k";
360
361 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
362 return str_1g;
363 /*
364 * 32-bit without PAE has a 4M large page size.
365 * PG_LEVEL_2M is misnamed, but we can at least
366 * print out the right size in the string.
367 */
368 if (IS_ENABLED(CONFIG_X86_32) &&
369 !IS_ENABLED(CONFIG_X86_PAE) &&
370 mr->page_size_mask & (1<<PG_LEVEL_2M))
371 return str_4m;
372
373 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
374 return str_2m;
375
376 return str_4k;
377}
378
Yinghai Lu4e33e062012-11-16 19:38:39 -0800379static int __meminit split_mem_range(struct map_range *mr, int nr_range,
380 unsigned long start,
381 unsigned long end)
Pekka Enbergf7650902009-03-05 14:55:05 +0200382{
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800383 unsigned long start_pfn, end_pfn, limit_pfn;
Yinghai Lu1829ae92012-11-16 19:39:14 -0800384 unsigned long pfn;
Yinghai Lu4e33e062012-11-16 19:38:39 -0800385 int i;
Pekka Enbergf7650902009-03-05 14:55:05 +0200386
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800387 limit_pfn = PFN_DOWN(end);
388
Pekka Enbergf7650902009-03-05 14:55:05 +0200389 /* head if not big page alignment ? */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800390 pfn = start_pfn = PFN_DOWN(start);
Pekka Enbergf7650902009-03-05 14:55:05 +0200391#ifdef CONFIG_X86_32
392 /*
393 * Don't use a large page for the first 2/4MB of memory
394 * because there are often fixed size MTRRs in there
395 * and overlapping MTRRs into large pages can cause
396 * slowdowns.
397 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800398 if (pfn == 0)
Yinghai Lu84d77002012-11-16 19:39:13 -0800399 end_pfn = PFN_DOWN(PMD_SIZE);
Pekka Enbergf7650902009-03-05 14:55:05 +0200400 else
Yinghai Lu1829ae92012-11-16 19:39:14 -0800401 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200402#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800403 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200404#endif
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800405 if (end_pfn > limit_pfn)
406 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200407 if (start_pfn < end_pfn) {
408 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
Yinghai Lu1829ae92012-11-16 19:39:14 -0800409 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200410 }
411
412 /* big page (2M) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800413 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200414#ifdef CONFIG_X86_32
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800415 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200416#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800417 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800418 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
419 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200420#endif
421
422 if (start_pfn < end_pfn) {
423 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
424 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800425 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200426 }
427
428#ifdef CONFIG_X86_64
429 /* big page (1G) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800430 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800431 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200432 if (start_pfn < end_pfn) {
433 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
434 page_size_mask &
435 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800436 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200437 }
438
439 /* tail is not big page (1G) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800440 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800441 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200442 if (start_pfn < end_pfn) {
443 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
444 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800445 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200446 }
447#endif
448
449 /* tail is not big page (2M) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800450 start_pfn = pfn;
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800451 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200452 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
453
Yinghai Lu7de3d662013-05-31 08:53:07 -0700454 if (!after_bootmem)
455 adjust_range_page_size_mask(mr, nr_range);
456
Pekka Enbergf7650902009-03-05 14:55:05 +0200457 /* try to merge same page size and continuous */
458 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
459 unsigned long old_start;
460 if (mr[i].end != mr[i+1].start ||
461 mr[i].page_size_mask != mr[i+1].page_size_mask)
462 continue;
463 /* move it */
464 old_start = mr[i].start;
465 memmove(&mr[i], &mr[i+1],
466 (nr_range - 1 - i) * sizeof(struct map_range));
467 mr[i--].start = old_start;
468 nr_range--;
469 }
470
471 for (i = 0; i < nr_range; i++)
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400472 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
Bjorn Helgaas365811d2012-05-29 15:06:29 -0700473 mr[i].start, mr[i].end - 1,
Dave Hansenf15e0512015-02-10 13:20:30 -0800474 page_size_string(&mr[i]));
Pekka Enbergf7650902009-03-05 14:55:05 +0200475
Yinghai Lu4e33e062012-11-16 19:38:39 -0800476 return nr_range;
477}
478
Ingo Molnar08b46d52017-01-28 17:29:08 +0100479struct range pfn_mapped[E820_MAX_ENTRIES];
Yinghai Lu0e691cf2013-01-24 12:20:05 -0800480int nr_pfn_mapped;
Jacob Shin66520eb2012-11-16 19:38:52 -0800481
482static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
483{
Ingo Molnar08b46d52017-01-28 17:29:08 +0100484 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
Jacob Shin66520eb2012-11-16 19:38:52 -0800485 nr_pfn_mapped, start_pfn, end_pfn);
Ingo Molnar08b46d52017-01-28 17:29:08 +0100486 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
Jacob Shin66520eb2012-11-16 19:38:52 -0800487
488 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
489
490 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
491 max_low_pfn_mapped = max(max_low_pfn_mapped,
492 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
493}
494
495bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
496{
497 int i;
498
499 for (i = 0; i < nr_pfn_mapped; i++)
500 if ((start_pfn >= pfn_mapped[i].start) &&
501 (end_pfn <= pfn_mapped[i].end))
502 return true;
503
504 return false;
505}
506
Yinghai Lu2086fe12012-11-16 19:38:40 -0800507/*
Yinghai Lu4e33e062012-11-16 19:38:39 -0800508 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
509 * This runs before bootmem is initialized and gets pages directly from
510 * the physical memory. To access them they are temporarily mapped.
511 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700512unsigned long __ref init_memory_mapping(unsigned long start,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700513 unsigned long end, pgprot_t prot)
Yinghai Lu4e33e062012-11-16 19:38:39 -0800514{
515 struct map_range mr[NR_RANGE_MR];
516 unsigned long ret = 0;
517 int nr_range, i;
518
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400519 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
Yinghai Lu4e33e062012-11-16 19:38:39 -0800520 start, end - 1);
521
522 memset(mr, 0, sizeof(mr));
523 nr_range = split_mem_range(mr, 0, start, end);
524
Pekka Enbergf7650902009-03-05 14:55:05 +0200525 for (i = 0; i < nr_range; i++)
526 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700527 mr[i].page_size_mask,
528 prot);
Pekka Enbergf7650902009-03-05 14:55:05 +0200529
Jacob Shin66520eb2012-11-16 19:38:52 -0800530 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
531
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800532 return ret >> PAGE_SHIFT;
533}
534
Jacob Shin66520eb2012-11-16 19:38:52 -0800535/*
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800536 * We need to iterate through the E820 memory map and create direct mappings
Ingo Molnar09821ff2017-01-28 17:09:33 +0100537 * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800538 * create direct mappings for all pfns from [0 to max_low_pfn) and
539 * [4GB to max_pfn) because of possible memory holes in high addresses
540 * that cannot be marked as UC by fixed/variable range MTRRs.
541 * Depending on the alignment of E820 ranges, this may possibly result
542 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
543 *
544 * init_mem_mapping() calls init_range_memory_mapping() with big range.
545 * That range would have hole in the middle or ends, and only ram parts
546 * will be mapped in init_range_memory_mapping().
Jacob Shin66520eb2012-11-16 19:38:52 -0800547 */
Yinghai Lu8d574702012-11-16 19:38:58 -0800548static unsigned long __init init_range_memory_mapping(
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800549 unsigned long r_start,
550 unsigned long r_end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800551{
552 unsigned long start_pfn, end_pfn;
Yinghai Lu8d574702012-11-16 19:38:58 -0800553 unsigned long mapped_ram_size = 0;
Jacob Shin66520eb2012-11-16 19:38:52 -0800554 int i;
555
Jacob Shin66520eb2012-11-16 19:38:52 -0800556 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800557 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
558 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
559 if (start >= end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800560 continue;
561
Yinghai Luc9b32342013-01-24 12:19:42 -0800562 /*
563 * if it is overlapping with brk pgt, we need to
564 * alloc pgt buf from memblock instead.
565 */
566 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
567 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700568 init_memory_mapping(start, end, PAGE_KERNEL);
Yinghai Lu8d574702012-11-16 19:38:58 -0800569 mapped_ram_size += end - start;
Yinghai Luc9b32342013-01-24 12:19:42 -0800570 can_use_brk_pgt = true;
Jacob Shin66520eb2012-11-16 19:38:52 -0800571 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800572
573 return mapped_ram_size;
Jacob Shin66520eb2012-11-16 19:38:52 -0800574}
575
Yinghai Lu69792872013-09-06 19:07:09 -0700576static unsigned long __init get_new_step_size(unsigned long step_size)
577{
578 /*
Jan Beulich132978b2014-12-19 16:10:54 +0000579 * Initial mapped size is PMD_SIZE (2M).
Yinghai Lu69792872013-09-06 19:07:09 -0700580 * We can not set step_size to be PUD_SIZE (1G) yet.
581 * In worse case, when we cross the 1G boundary, and
582 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
Jan Beulich132978b2014-12-19 16:10:54 +0000583 * to map 1G range with PTE. Hence we use one less than the
584 * difference of page table level shifts.
Yinghai Lu69792872013-09-06 19:07:09 -0700585 *
Jan Beulich132978b2014-12-19 16:10:54 +0000586 * Don't need to worry about overflow in the top-down case, on 32bit,
587 * when step_size is 0, round_down() returns 0 for start, and that
588 * turns it into 0x100000000ULL.
589 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
590 * needs to be taken into consideration by the code below.
Yinghai Lu69792872013-09-06 19:07:09 -0700591 */
Jan Beulich132978b2014-12-19 16:10:54 +0000592 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
Yinghai Lu69792872013-09-06 19:07:09 -0700593}
594
Tang Chen0167d7d2013-11-12 15:08:02 -0800595/**
596 * memory_map_top_down - Map [map_start, map_end) top down
597 * @map_start: start address of the target memory range
598 * @map_end: end address of the target memory range
599 *
600 * This function will setup direct mapping for memory range
601 * [map_start, map_end) in top-down. That said, the page tables
602 * will be allocated at the end of the memory, and we map the
603 * memory in top-down.
604 */
605static void __init memory_map_top_down(unsigned long map_start,
606 unsigned long map_end)
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800607{
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200608 unsigned long real_end, last_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800609 unsigned long step_size;
610 unsigned long addr;
611 unsigned long mapped_ram_size = 0;
Yinghai Luab951932012-11-16 19:38:45 -0800612
Yinghai Lu98e7a982013-03-06 20:18:21 -0800613 /* xen has big range in reserved near end of ram, skip it at first.*/
Tang Chen0167d7d2013-11-12 15:08:02 -0800614 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
Yinghai Lu8d574702012-11-16 19:38:58 -0800615 real_end = addr + PMD_SIZE;
616
617 /* step_size need to be small so pgt_buf from BRK could cover it */
618 step_size = PMD_SIZE;
619 max_pfn_mapped = 0; /* will get exact value next */
620 min_pfn_mapped = real_end >> PAGE_SHIFT;
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200621 last_start = real_end;
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800622
623 /*
624 * We start from the top (end of memory) and go to the bottom.
625 * The memblock_find_in_range() gets us a block of RAM from the
626 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
627 * for page table.
628 */
Tang Chen0167d7d2013-11-12 15:08:02 -0800629 while (last_start > map_start) {
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200630 unsigned long start;
631
Yinghai Lu8d574702012-11-16 19:38:58 -0800632 if (last_start > step_size) {
633 start = round_down(last_start - 1, step_size);
Tang Chen0167d7d2013-11-12 15:08:02 -0800634 if (start < map_start)
635 start = map_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800636 } else
Tang Chen0167d7d2013-11-12 15:08:02 -0800637 start = map_start;
Jan Beulich132978b2014-12-19 16:10:54 +0000638 mapped_ram_size += init_range_memory_mapping(start,
Yinghai Lu8d574702012-11-16 19:38:58 -0800639 last_start);
640 last_start = start;
641 min_pfn_mapped = last_start >> PAGE_SHIFT;
Jan Beulich132978b2014-12-19 16:10:54 +0000642 if (mapped_ram_size >= step_size)
Yinghai Lu69792872013-09-06 19:07:09 -0700643 step_size = get_new_step_size(step_size);
Yinghai Lu8d574702012-11-16 19:38:58 -0800644 }
645
Tang Chen0167d7d2013-11-12 15:08:02 -0800646 if (real_end < map_end)
647 init_range_memory_mapping(real_end, map_end);
648}
649
Tang Chenb959ed6c2013-11-12 15:08:05 -0800650/**
651 * memory_map_bottom_up - Map [map_start, map_end) bottom up
652 * @map_start: start address of the target memory range
653 * @map_end: end address of the target memory range
654 *
655 * This function will setup direct mapping for memory range
656 * [map_start, map_end) in bottom-up. Since we have limited the
657 * bottom-up allocation above the kernel, the page tables will
658 * be allocated just above the kernel and we map the memory
659 * in [map_start, map_end) in bottom-up.
660 */
661static void __init memory_map_bottom_up(unsigned long map_start,
662 unsigned long map_end)
663{
Jan Beulich132978b2014-12-19 16:10:54 +0000664 unsigned long next, start;
Tang Chenb959ed6c2013-11-12 15:08:05 -0800665 unsigned long mapped_ram_size = 0;
666 /* step_size need to be small so pgt_buf from BRK could cover it */
667 unsigned long step_size = PMD_SIZE;
668
669 start = map_start;
670 min_pfn_mapped = start >> PAGE_SHIFT;
671
672 /*
673 * We start from the bottom (@map_start) and go to the top (@map_end).
674 * The memblock_find_in_range() gets us a block of RAM from the
675 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
676 * for page table.
677 */
678 while (start < map_end) {
Jan Beulich132978b2014-12-19 16:10:54 +0000679 if (step_size && map_end - start > step_size) {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800680 next = round_up(start + 1, step_size);
681 if (next > map_end)
682 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000683 } else {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800684 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000685 }
Tang Chenb959ed6c2013-11-12 15:08:05 -0800686
Jan Beulich132978b2014-12-19 16:10:54 +0000687 mapped_ram_size += init_range_memory_mapping(start, next);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800688 start = next;
689
Jan Beulich132978b2014-12-19 16:10:54 +0000690 if (mapped_ram_size >= step_size)
Tang Chenb959ed6c2013-11-12 15:08:05 -0800691 step_size = get_new_step_size(step_size);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800692 }
693}
694
Mike Rapoport88107d32020-06-08 21:33:01 -0700695/*
696 * The real mode trampoline, which is required for bootstrapping CPUs
697 * occupies only a small area under the low 1MB. See reserve_real_mode()
698 * for details.
699 *
700 * If KASLR is disabled the first PGD entry of the direct mapping is copied
701 * to map the real mode trampoline.
702 *
703 * If KASLR is enabled, copy only the PUD which covers the low 1MB
704 * area. This limits the randomization granularity to 1GB for both 4-level
705 * and 5-level paging.
706 */
707static void __init init_trampoline(void)
708{
709#ifdef CONFIG_X86_64
710 if (!kaslr_memory_enabled())
711 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
712 else
713 init_trampoline_kaslr();
714#endif
715}
716
Tang Chen0167d7d2013-11-12 15:08:02 -0800717void __init init_mem_mapping(void)
718{
719 unsigned long end;
720
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100721 pti_check_boottime_disable();
Tang Chen0167d7d2013-11-12 15:08:02 -0800722 probe_page_size_mask();
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700723 setup_pcid();
Tang Chen0167d7d2013-11-12 15:08:02 -0800724
725#ifdef CONFIG_X86_64
726 end = max_pfn << PAGE_SHIFT;
727#else
728 end = max_low_pfn << PAGE_SHIFT;
729#endif
730
731 /* the ISA range is always mapped regardless of memory holes */
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700732 init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
Tang Chen0167d7d2013-11-12 15:08:02 -0800733
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700734 /* Init the trampoline, possibly with KASLR memory offset */
735 init_trampoline();
736
Tang Chenb959ed6c2013-11-12 15:08:05 -0800737 /*
738 * If the allocation is in bottom-up direction, we setup direct mapping
739 * in bottom-up, otherwise we setup direct mapping in top-down.
740 */
741 if (memblock_bottom_up()) {
742 unsigned long kernel_end = __pa_symbol(_end);
743
744 /*
745 * we need two separate calls here. This is because we want to
746 * allocate page tables above the kernel. So we first map
747 * [kernel_end, end) to make memory above the kernel be mapped
748 * as soon as possible. And then use page tables allocated above
749 * the kernel to map [ISA_END_ADDRESS, kernel_end).
750 */
751 memory_map_bottom_up(kernel_end, end);
752 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
753 } else {
754 memory_map_top_down(ISA_END_ADDRESS, end);
755 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800756
Yinghai Luf763ad12012-11-16 19:38:57 -0800757#ifdef CONFIG_X86_64
758 if (max_pfn > max_low_pfn) {
759 /* can we preseve max_low_pfn ?*/
760 max_low_pfn = max_pfn;
761 }
Yinghai Lu719272c2012-11-16 19:39:06 -0800762#else
763 early_ioremap_page_table_range_init();
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800764#endif
765
Yinghai Lu719272c2012-11-16 19:39:06 -0800766 load_cr3(swapper_pg_dir);
767 __flush_tlb_all();
Yinghai Lu719272c2012-11-16 19:39:06 -0800768
Juergen Grossf72e38e2017-11-09 14:27:35 +0100769 x86_init.hyper.init_mem_mapping();
Juergen Grossc138d812017-07-28 12:23:12 +0200770
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800771 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800772}
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200773
Pekka Enberg540aca02009-03-04 11:46:40 +0200774/*
Nadav Amit4fc19702019-04-26 16:22:46 -0700775 * Initialize an mm_struct to be used during poking and a pointer to be used
776 * during patching.
777 */
778void __init poking_init(void)
779{
780 spinlock_t *ptl;
781 pte_t *ptep;
782
783 poking_mm = copy_init_mm();
784 BUG_ON(!poking_mm);
785
786 /*
787 * Randomize the poking address, but make sure that the following page
788 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
789 * and adjust the address if the PMD ends after the first one.
790 */
791 poking_addr = TASK_UNMAPPED_BASE;
792 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
793 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
794 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
795
796 if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
797 poking_addr += PAGE_SIZE;
798
799 /*
800 * We need to trigger the allocation of the page-tables that will be
801 * needed for poking now. Later, poking may be performed in an atomic
802 * section, which might cause allocation to fail.
803 */
804 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
805 BUG_ON(!ptep);
806 pte_unmap_unlock(ptep, ptl);
807}
808
809/*
Pekka Enberg540aca02009-03-04 11:46:40 +0200810 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
811 * is valid. The argument is a physical page number.
812 *
Kees Cooka4866aa2017-04-05 09:39:08 -0700813 * On x86, access has to be given to the first megabyte of RAM because that
814 * area traditionally contains BIOS code and data regions used by X, dosemu,
815 * and similar apps. Since they map the entire memory range, the whole range
816 * must be allowed (for mapping), but any areas that would otherwise be
817 * disallowed are flagged as being "zero filled" instead of rejected.
818 * Access has to be given to non-kernel-ram areas as well, these contain the
819 * PCI mmio resources as well as potential bios/acpi data regions.
Pekka Enberg540aca02009-03-04 11:46:40 +0200820 */
821int devmem_is_allowed(unsigned long pagenr)
822{
Dan Williams2bdce742018-06-14 15:26:24 -0700823 if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
824 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
825 != REGION_DISJOINT) {
Kees Cooka4866aa2017-04-05 09:39:08 -0700826 /*
827 * For disallowed memory regions in the low 1MB range,
828 * request that the page be shown as all zeros.
829 */
830 if (pagenr < 256)
831 return 2;
832
Pekka Enberg540aca02009-03-04 11:46:40 +0200833 return 0;
Kees Cooka4866aa2017-04-05 09:39:08 -0700834 }
835
836 /*
837 * This must follow RAM test, since System RAM is considered a
838 * restricted resource under CONFIG_STRICT_IOMEM.
839 */
840 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
841 /* Low 1MB bypasses iomem restrictions. */
842 if (pagenr < 256)
843 return 1;
844
845 return 0;
846 }
847
848 return 1;
Pekka Enberg540aca02009-03-04 11:46:40 +0200849}
850
Alexey Dobriyane5cb1132018-12-28 00:36:03 -0800851void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200852{
Yinghai Luc967da62010-03-28 19:42:55 -0700853 unsigned long begin_aligned, end_aligned;
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200854
Yinghai Luc967da62010-03-28 19:42:55 -0700855 /* Make sure boundaries are page aligned */
856 begin_aligned = PAGE_ALIGN(begin);
857 end_aligned = end & PAGE_MASK;
858
859 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
860 begin = begin_aligned;
861 end = end_aligned;
862 }
863
864 if (begin >= end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200865 return;
866
867 /*
868 * If debugging page accesses then do not free this memory but
869 * mark them not present - any buggy init-section access will
870 * create a kernel page fault:
871 */
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700872 if (debug_pagealloc_enabled()) {
873 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
874 begin, end - 1);
Qian Cai0d021132019-04-23 12:58:11 -0400875 /*
876 * Inform kmemleak about the hole in the memory since the
877 * corresponding pages will be unmapped.
878 */
879 kmemleak_free_part((void *)begin, end - begin);
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700880 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
881 } else {
882 /*
883 * We just marked the kernel text read only above, now that
884 * we are going to free part of that, we need to make that
885 * writeable and non-executable first.
886 */
887 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
888 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200889
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700890 free_reserved_area((void *)begin, (void *)end,
891 POISON_FREE_INITMEM, what);
892 }
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200893}
894
Dave Hansen6ea27382018-08-02 15:58:29 -0700895/*
896 * begin/end can be in the direct map or the "high kernel mapping"
897 * used for the kernel image only. free_init_pages() will do the
898 * right thing for either kind of address.
899 */
Kees Cook5494c3a2019-10-29 14:13:49 -0700900void free_kernel_image_pages(const char *what, void *begin, void *end)
Dave Hansen6ea27382018-08-02 15:58:29 -0700901{
Dave Hansenc40a56a2018-08-02 15:58:31 -0700902 unsigned long begin_ul = (unsigned long)begin;
903 unsigned long end_ul = (unsigned long)end;
904 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
905
Kees Cook5494c3a2019-10-29 14:13:49 -0700906 free_init_pages(what, begin_ul, end_ul);
Dave Hansenc40a56a2018-08-02 15:58:31 -0700907
908 /*
909 * PTI maps some of the kernel into userspace. For performance,
910 * this includes some kernel areas that do not contain secrets.
911 * Those areas might be adjacent to the parts of the kernel image
912 * being freed, which may contain secrets. Remove the "high kernel
913 * image mapping" for these freed areas, ensuring they are not even
914 * potentially vulnerable to Meltdown regardless of the specific
915 * optimizations PTI is currently using.
916 *
917 * The "noalias" prevents unmapping the direct map alias which is
918 * needed to access the freed pages.
919 *
920 * This is only valid for 64bit kernels. 32bit has only one mapping
921 * which can't be treated in this way for obvious reasons.
922 */
923 if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
924 set_memory_np_noalias(begin_ul, len_pages);
Dave Hansen6ea27382018-08-02 15:58:29 -0700925}
926
Denys Vlasenko18278222016-09-18 20:21:25 +0200927void __ref free_initmem(void)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200928{
Ingo Molnar0c6fc112017-01-28 22:52:16 +0100929 e820__reallocate_tables();
Denys Vlasenko47533962016-09-17 23:39:26 +0200930
Brijesh Singhb3f09072018-09-14 08:45:58 -0500931 mem_encrypt_free_decrypted_mem();
932
Kees Cook5494c3a2019-10-29 14:13:49 -0700933 free_kernel_image_pages("unused kernel image (initmem)",
934 &__init_begin, &__init_end);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200935}
Pekka Enberg731ddea2009-03-04 11:13:40 +0200936
937#ifdef CONFIG_BLK_DEV_INITRD
Jan Beulich0d26d1d2012-06-18 11:30:20 +0100938void __init free_initrd_mem(unsigned long start, unsigned long end)
Pekka Enberg731ddea2009-03-04 11:13:40 +0200939{
Fenghua Yucd745be2012-12-20 23:44:31 -0800940 /*
Yinghai Luc967da62010-03-28 19:42:55 -0700941 * end could be not aligned, and We can not align that,
942 * decompresser could be confused by aligned initrd_end
943 * We already reserve the end partial page before in
944 * - i386_start_kernel()
945 * - x86_64_start_kernel()
946 * - relocate_initrd()
947 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
948 */
Jiang Liuc88442e2013-07-03 15:02:58 -0700949 free_init_pages("initrd", start, PAGE_ALIGN(end));
Pekka Enberg731ddea2009-03-04 11:13:40 +0200950}
951#endif
Pekka Enberg17623912011-11-01 15:58:22 +0200952
Ingo Molnar4270fd82017-01-28 12:45:40 +0100953/*
954 * Calculate the precise size of the DMA zone (first 16 MB of RAM),
955 * and pass it to the MM layer - to help it set zone watermarks more
956 * accurately.
957 *
958 * Done on 64-bit systems only for the time being, although 32-bit systems
959 * might benefit from this as well.
960 */
961void __init memblock_find_dma_reserve(void)
962{
963#ifdef CONFIG_X86_64
964 u64 nr_pages = 0, nr_free_pages = 0;
965 unsigned long start_pfn, end_pfn;
966 phys_addr_t start_addr, end_addr;
967 int i;
968 u64 u;
969
970 /*
971 * Iterate over all memory ranges (free and reserved ones alike),
972 * to calculate the total number of pages in the first 16 MB of RAM:
973 */
974 nr_pages = 0;
975 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
976 start_pfn = min(start_pfn, MAX_DMA_PFN);
977 end_pfn = min(end_pfn, MAX_DMA_PFN);
978
979 nr_pages += end_pfn - start_pfn;
980 }
981
982 /*
983 * Iterate over free memory ranges to calculate the number of free
984 * pages in the DMA zone, while not counting potential partial
985 * pages at the beginning or the end of the range:
986 */
987 nr_free_pages = 0;
988 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
989 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
990 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
991
992 if (start_pfn < end_pfn)
993 nr_free_pages += end_pfn - start_pfn;
994 }
995
996 set_dma_reserve(nr_pages - nr_free_pages);
997#endif
998}
999
Pekka Enberg17623912011-11-01 15:58:22 +02001000void __init zone_sizes_init(void)
1001{
1002 unsigned long max_zone_pfns[MAX_NR_ZONES];
1003
1004 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1005
1006#ifdef CONFIG_ZONE_DMA
Xishi Qiuc072b902014-12-10 10:09:01 +08001007 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +02001008#endif
1009#ifdef CONFIG_ZONE_DMA32
Xishi Qiuc072b902014-12-10 10:09:01 +08001010 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +02001011#endif
1012 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1013#ifdef CONFIG_HIGHMEM
1014 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1015#endif
1016
Mike Rapoport9691a072020-06-03 15:57:10 -07001017 free_area_init(max_zone_pfns);
Pekka Enberg17623912011-11-01 15:58:22 +02001018}
1019
Peter Zijlstra6fd166a2017-12-04 15:07:59 +01001020__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
Andy Lutomirski3d28ebc2017-05-28 10:00:15 -07001021 .loaded_mm = &init_mm,
Andy Lutomirski10af6232017-07-24 21:41:38 -07001022 .next_asid = 1,
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001023 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1024};
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001025
Juergen Grossbd809af2014-11-03 14:02:03 +01001026void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1027{
1028 /* entry 0 MUST be WB (hardwired to speed up translations) */
1029 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1030
1031 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1032 __pte2cachemode_tbl[entry] = cache;
1033}
Andi Kleen377eeaa2018-06-13 15:48:28 -07001034
Vlastimil Babka792adb92018-08-14 20:50:47 +02001035#ifdef CONFIG_SWAP
Andi Kleen377eeaa2018-06-13 15:48:28 -07001036unsigned long max_swapfile_size(void)
1037{
1038 unsigned long pages;
1039
1040 pages = generic_max_swapfile_size();
1041
Michal Hocko5b5e4d62018-11-13 19:49:10 +01001042 if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
Andi Kleen377eeaa2018-06-13 15:48:28 -07001043 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
Vlastimil Babkab0a182f2018-08-23 15:44:18 +02001044 unsigned long long l1tf_limit = l1tf_pfn_limit();
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001045 /*
1046 * We encode swap offsets also with 3 bits below those for pfn
1047 * which makes the usable limit higher.
1048 */
Vlastimil Babka0d0f6242018-06-22 17:39:33 +02001049#if CONFIG_PGTABLE_LEVELS > 2
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001050 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1051#endif
Vlastimil Babka9df95162018-08-20 11:58:35 +02001052 pages = min_t(unsigned long long, l1tf_limit, pages);
Andi Kleen377eeaa2018-06-13 15:48:28 -07001053 }
1054 return pages;
1055}
Vlastimil Babka792adb92018-08-14 20:50:47 +02001056#endif