blob: e26f5c5c6565a195910dd632bb92f2038e107c57 [file] [log] [blame]
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09001#include <linux/gfp.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +05302#include <linux/initrd.h>
Pekka Enberg540aca02009-03-04 11:46:40 +02003#include <linux/ioport.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +02004#include <linux/swap.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -07005#include <linux/memblock.h>
Andi Kleen377eeaa2018-06-13 15:48:28 -07006#include <linux/swapfile.h>
7#include <linux/swapops.h>
Qian Cai0d021132019-04-23 12:58:11 -04008#include <linux/kmemleak.h>
Nadav Amit4fc19702019-04-26 16:22:46 -07009#include <linux/sched/task.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020010
Laura Abbottd1163652017-05-08 15:58:11 -070011#include <asm/set_memory.h>
Ingo Molnar66441bd2017-01-27 10:27:10 +010012#include <asm/e820/api.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020013#include <asm/init.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020014#include <asm/page.h>
Pekka Enberg540aca02009-03-04 11:46:40 +020015#include <asm/page_types.h>
Pekka Enberge5b2bb52009-03-03 13:15:06 +020016#include <asm/sections.h>
Jan Beulich49834392009-05-06 13:06:47 +010017#include <asm/setup.h>
Pekka Enbergf7650902009-03-05 14:55:05 +020018#include <asm/tlbflush.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030019#include <asm/tlb.h>
Jaswinder Singh Rajput76c06922009-07-01 19:54:23 +053020#include <asm/proto.h>
Pekka Enberg17623912011-11-01 15:58:22 +020021#include <asm/dma.h> /* for MAX_DMA_PFN */
Fenghua Yucd745be2012-12-20 23:44:31 -080022#include <asm/microcode.h>
Thomas Garnier0483e1f2016-06-21 17:47:02 -070023#include <asm/kaslr.h>
Juergen Grossc138d812017-07-28 12:23:12 +020024#include <asm/hypervisor.h>
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -070025#include <asm/cpufeature.h>
Thomas Gleixneraa8c6242017-12-04 15:07:36 +010026#include <asm/pti.h>
Nadav Amit4fc19702019-04-26 16:22:46 -070027#include <asm/text-patching.h>
Benjamin Thield5249bc2020-06-06 14:26:29 +020028#include <asm/memtype.h>
Pekka Enberg9518e0e2009-04-28 16:00:50 +030029
Dave Hansend17d8f92014-07-31 08:40:59 -070030/*
31 * We need to define the tracepoints somewhere, and tlb.c
32 * is only compied when SMP=y.
33 */
34#define CREATE_TRACE_POINTS
35#include <trace/events/tlb.h>
36
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080037#include "mm_internal.h"
38
Juergen Gross281d4072014-11-03 14:01:47 +010039/*
40 * Tables translating between page_cache_type_t and pte encoding.
Ingo Molnarc709fed2015-03-05 08:58:44 +010041 *
Toshi Kanid5dc8612015-07-22 12:06:11 -060042 * The default values are defined statically as minimal supported mode;
43 * WC and WT fall back to UC-. pat_init() updates these values to support
44 * more cache modes, WC and WT, when it is safe to do so. See pat_init()
45 * for the details. Note, __early_ioremap() used during early boot-time
46 * takes pgprot_t (pte encoding) and does not use these tables.
Ingo Molnarc709fed2015-03-05 08:58:44 +010047 *
48 * Index into __cachemode2pte_tbl[] is the cachemode.
49 *
50 * Index into __pte2cachemode_tbl[] are the caching attribute bits of the pte
51 * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
Juergen Gross281d4072014-11-03 14:01:47 +010052 */
Christoph Hellwigde17a372020-04-08 17:27:45 +020053static uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010054 [_PAGE_CACHE_MODE_WB ] = 0 | 0 ,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020055 [_PAGE_CACHE_MODE_WC ] = 0 | _PAGE_PCD,
Ingo Molnarc709fed2015-03-05 08:58:44 +010056 [_PAGE_CACHE_MODE_UC_MINUS] = 0 | _PAGE_PCD,
57 [_PAGE_CACHE_MODE_UC ] = _PAGE_PWT | _PAGE_PCD,
58 [_PAGE_CACHE_MODE_WT ] = 0 | _PAGE_PCD,
59 [_PAGE_CACHE_MODE_WP ] = 0 | _PAGE_PCD,
Juergen Gross281d4072014-11-03 14:01:47 +010060};
Ingo Molnarc709fed2015-03-05 08:58:44 +010061
Christoph Hellwigde17a372020-04-08 17:27:45 +020062unsigned long cachemode2protval(enum page_cache_mode pcm)
63{
64 if (likely(pcm == 0))
65 return 0;
66 return __cachemode2pte_tbl[pcm];
67}
68EXPORT_SYMBOL(cachemode2protval);
Juergen Gross31bb7722015-01-22 12:43:17 +010069
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020070static uint8_t __pte2cachemode_tbl[8] = {
Ingo Molnarc709fed2015-03-05 08:58:44 +010071 [__pte2cm_idx( 0 | 0 | 0 )] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020072 [__pte2cm_idx(_PAGE_PWT | 0 | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010073 [__pte2cm_idx( 0 | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC_MINUS,
74 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0 )] = _PAGE_CACHE_MODE_UC,
75 [__pte2cm_idx( 0 | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
Borislav Petkov9cd25aa2015-06-04 18:55:10 +020076 [__pte2cm_idx(_PAGE_PWT | 0 | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Ingo Molnarc709fed2015-03-05 08:58:44 +010077 [__pte2cm_idx(0 | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
Juergen Gross281d4072014-11-03 14:01:47 +010078 [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
79};
Juergen Gross281d4072014-11-03 14:01:47 +010080
Christoph Hellwig1f6f6552020-04-08 17:27:42 +020081/* Check that the write-protect PAT entry is set for write-protect */
82bool x86_has_pat_wp(void)
83{
84 return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP;
85}
86
Christoph Hellwig7fa3e102020-04-08 17:27:43 +020087enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
88{
89 unsigned long masked;
90
91 masked = pgprot_val(pgprot) & _PAGE_CACHE_MASK;
92 if (likely(masked == 0))
93 return 0;
94 return __pte2cachemode_tbl[__pte2cm_idx(masked)];
95}
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080096
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080097static unsigned long __initdata pgt_buf_start;
Yinghai Luc9b32342013-01-24 12:19:42 -080098static unsigned long __initdata pgt_buf_end;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -080099static unsigned long __initdata pgt_buf_top;
100
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800101static unsigned long min_pfn_mapped;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800102
103static bool __initdata can_use_brk_pgt = true;
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800104
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800105/*
Zhi Yong Wud4dd1002013-11-12 15:08:28 -0800106 * Pages returned are already directly mapped.
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800107 *
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800108 * Changing that is likely to break Xen, see commit:
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800109 *
110 * 279b706 x86,xen: introduce x86_init.mapping.pagetable_reserve
111 *
Yinghai Luc9b32342013-01-24 12:19:42 -0800112 * for detailed information.
113 */
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800114__ref void *alloc_low_pages(unsigned int num)
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800115{
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800116 unsigned long pfn;
117 int i;
118
119 if (after_bootmem) {
120 unsigned int order;
121
122 order = get_order((unsigned long)num << PAGE_SHIFT);
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -0800123 return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800124 }
125
126 if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200127 unsigned long ret = 0;
128
129 if (min_pfn_mapped < max_pfn_mapped) {
130 ret = memblock_find_in_range(
131 min_pfn_mapped << PAGE_SHIFT,
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800132 max_pfn_mapped << PAGE_SHIFT,
133 PAGE_SIZE * num , PAGE_SIZE);
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200134 }
135 if (ret)
136 memblock_reserve(ret, PAGE_SIZE * num);
137 else if (can_use_brk_pgt)
138 ret = __pa(extend_brk(PAGE_SIZE * num, PAGE_SIZE));
139
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800140 if (!ret)
141 panic("alloc_low_pages: can not alloc memory");
Juergen Gross75f2d3a2018-08-20 17:24:20 +0200142
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800143 pfn = ret >> PAGE_SHIFT;
144 } else {
145 pfn = pgt_buf_end;
146 pgt_buf_end += num;
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800147 }
148
Yinghai Lu22c8ca22012-11-16 19:39:04 -0800149 for (i = 0; i < num; i++) {
150 void *adr;
151
152 adr = __va((pfn + i) << PAGE_SHIFT);
153 clear_page(adr);
154 }
155
156 return __va(pfn << PAGE_SHIFT);
Yinghai Lu5c51bdb2012-11-16 19:39:01 -0800157}
158
Thomas Garnierfb754f92016-08-09 10:11:05 -0700159/*
160 * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
161 * With KASLR memory randomization, depending on the machine e820 memory
162 * and the PUD alignment. We may need twice more pages when KASLR memory
163 * randomization is enabled.
164 */
165#ifndef CONFIG_RANDOMIZE_MEMORY
166#define INIT_PGD_PAGE_COUNT 6
167#else
168#define INIT_PGD_PAGE_COUNT 12
169#endif
170#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
Yinghai Lu8d574702012-11-16 19:38:58 -0800171RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
172void __init early_alloc_pgt_buf(void)
173{
174 unsigned long tables = INIT_PGT_BUF_SIZE;
175 phys_addr_t base;
176
177 base = __pa(extend_brk(tables, PAGE_SIZE));
178
179 pgt_buf_start = base >> PAGE_SHIFT;
180 pgt_buf_end = pgt_buf_start;
181 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
182}
183
Pekka Enbergf7650902009-03-05 14:55:05 +0200184int after_bootmem;
185
Ingo Molnar10971ab2015-03-05 08:18:23 +0100186early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
Yinghai Lu148b2092012-11-16 19:39:08 -0800187
Jacob Shin844ab6f2012-10-24 14:24:44 -0500188struct map_range {
189 unsigned long start;
190 unsigned long end;
191 unsigned page_size_mask;
192};
193
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800194static int page_size_mask;
Pekka Enbergf7650902009-03-05 14:55:05 +0200195
Thomas Gleixner96f59fe2020-04-21 11:20:39 +0200196/*
197 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
198 * enable and PPro Global page enable), so that any CPU's that boot
199 * up after us can get the correct flags. Invoked on the boot CPU.
200 */
201static inline void cr4_set_bits_and_update_boot(unsigned long mask)
202{
203 mmu_cr4_features |= mask;
204 if (trampoline_cr4_features)
205 *trampoline_cr4_features = mmu_cr4_features;
206 cr4_set_bits(mask);
207}
208
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800209static void __init probe_page_size_mask(void)
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800210{
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800211 /*
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800212 * For pagealloc debugging, identity mapping will use small pages.
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800213 * This will simplify cpa(), which otherwise needs to support splitting
214 * large pages into small in interrupt context, etc.
215 */
Levin, Alexander (Sasha Levin)4675ff02017-11-15 17:36:02 -0800216 if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled())
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800217 page_size_mask |= 1 << PG_LEVEL_2M;
Vlastimil Babkad9ee35a2017-06-12 09:21:30 +0200218 else
219 direct_gbpages = 0;
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800220
221 /* Enable PSE if available */
Borislav Petkov16bf9222016-03-29 17:42:03 +0200222 if (boot_cpu_has(X86_FEATURE_PSE))
Andy Lutomirski375074c2014-10-24 15:58:07 -0700223 cr4_set_bits_and_update_boot(X86_CR4_PSE);
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800224
225 /* Enable PGE if available */
Dave Hansenc313ec62017-12-04 15:07:34 +0100226 __supported_pte_mask &= ~_PAGE_GLOBAL;
Borislav Petkovc109bf92016-03-29 17:42:02 +0200227 if (boot_cpu_has(X86_FEATURE_PGE)) {
Andy Lutomirski375074c2014-10-24 15:58:07 -0700228 cr4_set_bits_and_update_boot(X86_CR4_PGE);
Dave Hansen39114b72018-04-06 13:55:17 -0700229 __supported_pte_mask |= _PAGE_GLOBAL;
Dave Hansenc313ec62017-12-04 15:07:34 +0100230 }
Ingo Molnare61980a2015-03-05 08:25:01 +0100231
Dave Hansen8a57f482018-04-06 13:55:06 -0700232 /* By the default is everything supported: */
233 __default_kernel_pte_mask = __supported_pte_mask;
234 /* Except when with PTI where the kernel is mostly non-Global: */
235 if (cpu_feature_enabled(X86_FEATURE_PTI))
236 __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
237
Ingo Molnare61980a2015-03-05 08:25:01 +0100238 /* Enable 1 GB linear kernel mappings if available: */
Borislav Petkovb8291adc2016-03-29 17:41:58 +0200239 if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
Ingo Molnare61980a2015-03-05 08:25:01 +0100240 printk(KERN_INFO "Using GB pages for direct mapping\n");
241 page_size_mask |= 1 << PG_LEVEL_1G;
242 } else {
243 direct_gbpages = 0;
244 }
Yinghai Lufa62aaf2012-11-16 19:38:38 -0800245}
Stefano Stabellini279b7062011-04-14 15:49:41 +0100246
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700247static void setup_pcid(void)
248{
Dave Hansen6cff64b2017-12-04 15:08:01 +0100249 if (!IS_ENABLED(CONFIG_X86_64))
250 return;
251
252 if (!boot_cpu_has(X86_FEATURE_PCID))
253 return;
254
255 if (boot_cpu_has(X86_FEATURE_PGE)) {
256 /*
257 * This can't be cr4_set_bits_and_update_boot() -- the
258 * trampoline code can't handle CR4.PCIDE and it wouldn't
259 * do any good anyway. Despite the name,
260 * cr4_set_bits_and_update_boot() doesn't actually cause
261 * the bits in question to remain set all the way through
262 * the secondary boot asm.
263 *
264 * Instead, we brute-force it and set CR4.PCIDE manually in
265 * start_secondary().
266 */
267 cr4_set_bits(X86_CR4_PCIDE);
268
269 /*
270 * INVPCID's single-context modes (2/3) only work if we set
271 * X86_CR4_PCIDE, *and* we INVPCID support. It's unusable
272 * on systems that have X86_CR4_PCIDE clear, or that have
273 * no INVPCID support at all.
274 */
275 if (boot_cpu_has(X86_FEATURE_INVPCID))
276 setup_force_cpu_cap(X86_FEATURE_INVPCID_SINGLE);
277 } else {
278 /*
279 * flush_tlb_all(), as currently implemented, won't work if
280 * PCID is on but PGE is not. Since that combination
281 * doesn't exist on real hardware, there's no reason to try
282 * to fully support it, but it's polite to avoid corrupting
283 * data if we're on an improperly configured VM.
284 */
285 setup_clear_cpu_cap(X86_FEATURE_PCID);
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700286 }
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700287}
288
Pekka Enbergf7650902009-03-05 14:55:05 +0200289#ifdef CONFIG_X86_32
290#define NR_RANGE_MR 3
291#else /* CONFIG_X86_64 */
292#define NR_RANGE_MR 5
293#endif
294
Jan Beulichdc9dd5c2009-03-12 12:40:06 +0000295static int __meminit save_mr(struct map_range *mr, int nr_range,
296 unsigned long start_pfn, unsigned long end_pfn,
297 unsigned long page_size_mask)
Pekka Enbergf7650902009-03-05 14:55:05 +0200298{
299 if (start_pfn < end_pfn) {
300 if (nr_range >= NR_RANGE_MR)
301 panic("run out of range for init_memory_mapping\n");
302 mr[nr_range].start = start_pfn<<PAGE_SHIFT;
303 mr[nr_range].end = end_pfn<<PAGE_SHIFT;
304 mr[nr_range].page_size_mask = page_size_mask;
305 nr_range++;
306 }
307
308 return nr_range;
309}
310
Yinghai Luaeebe842012-11-16 19:38:55 -0800311/*
312 * adjust the page_size_mask for small range to go with
313 * big page size instead small one if nearby are ram too.
314 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700315static void __ref adjust_range_page_size_mask(struct map_range *mr,
Yinghai Luaeebe842012-11-16 19:38:55 -0800316 int nr_range)
317{
318 int i;
319
320 for (i = 0; i < nr_range; i++) {
321 if ((page_size_mask & (1<<PG_LEVEL_2M)) &&
322 !(mr[i].page_size_mask & (1<<PG_LEVEL_2M))) {
323 unsigned long start = round_down(mr[i].start, PMD_SIZE);
324 unsigned long end = round_up(mr[i].end, PMD_SIZE);
325
326#ifdef CONFIG_X86_32
327 if ((end >> PAGE_SHIFT) > max_low_pfn)
328 continue;
329#endif
330
331 if (memblock_is_region_memory(start, end - start))
332 mr[i].page_size_mask |= 1<<PG_LEVEL_2M;
333 }
334 if ((page_size_mask & (1<<PG_LEVEL_1G)) &&
335 !(mr[i].page_size_mask & (1<<PG_LEVEL_1G))) {
336 unsigned long start = round_down(mr[i].start, PUD_SIZE);
337 unsigned long end = round_up(mr[i].end, PUD_SIZE);
338
339 if (memblock_is_region_memory(start, end - start))
340 mr[i].page_size_mask |= 1<<PG_LEVEL_1G;
341 }
342 }
343}
344
Dave Hansenf15e0512015-02-10 13:20:30 -0800345static const char *page_size_string(struct map_range *mr)
346{
347 static const char str_1g[] = "1G";
348 static const char str_2m[] = "2M";
349 static const char str_4m[] = "4M";
350 static const char str_4k[] = "4k";
351
352 if (mr->page_size_mask & (1<<PG_LEVEL_1G))
353 return str_1g;
354 /*
355 * 32-bit without PAE has a 4M large page size.
356 * PG_LEVEL_2M is misnamed, but we can at least
357 * print out the right size in the string.
358 */
359 if (IS_ENABLED(CONFIG_X86_32) &&
360 !IS_ENABLED(CONFIG_X86_PAE) &&
361 mr->page_size_mask & (1<<PG_LEVEL_2M))
362 return str_4m;
363
364 if (mr->page_size_mask & (1<<PG_LEVEL_2M))
365 return str_2m;
366
367 return str_4k;
368}
369
Yinghai Lu4e33e062012-11-16 19:38:39 -0800370static int __meminit split_mem_range(struct map_range *mr, int nr_range,
371 unsigned long start,
372 unsigned long end)
Pekka Enbergf7650902009-03-05 14:55:05 +0200373{
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800374 unsigned long start_pfn, end_pfn, limit_pfn;
Yinghai Lu1829ae92012-11-16 19:39:14 -0800375 unsigned long pfn;
Yinghai Lu4e33e062012-11-16 19:38:39 -0800376 int i;
Pekka Enbergf7650902009-03-05 14:55:05 +0200377
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800378 limit_pfn = PFN_DOWN(end);
379
Pekka Enbergf7650902009-03-05 14:55:05 +0200380 /* head if not big page alignment ? */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800381 pfn = start_pfn = PFN_DOWN(start);
Pekka Enbergf7650902009-03-05 14:55:05 +0200382#ifdef CONFIG_X86_32
383 /*
384 * Don't use a large page for the first 2/4MB of memory
385 * because there are often fixed size MTRRs in there
386 * and overlapping MTRRs into large pages can cause
387 * slowdowns.
388 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800389 if (pfn == 0)
Yinghai Lu84d77002012-11-16 19:39:13 -0800390 end_pfn = PFN_DOWN(PMD_SIZE);
Pekka Enbergf7650902009-03-05 14:55:05 +0200391 else
Yinghai Lu1829ae92012-11-16 19:39:14 -0800392 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200393#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800394 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200395#endif
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800396 if (end_pfn > limit_pfn)
397 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200398 if (start_pfn < end_pfn) {
399 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
Yinghai Lu1829ae92012-11-16 19:39:14 -0800400 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200401 }
402
403 /* big page (2M) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800404 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200405#ifdef CONFIG_X86_32
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800406 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200407#else /* CONFIG_X86_64 */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800408 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800409 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
410 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200411#endif
412
413 if (start_pfn < end_pfn) {
414 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
415 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800416 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200417 }
418
419#ifdef CONFIG_X86_64
420 /* big page (1G) range */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800421 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800422 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200423 if (start_pfn < end_pfn) {
424 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
425 page_size_mask &
426 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800427 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200428 }
429
430 /* tail is not big page (1G) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800431 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800432 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
Pekka Enbergf7650902009-03-05 14:55:05 +0200433 if (start_pfn < end_pfn) {
434 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
435 page_size_mask & (1<<PG_LEVEL_2M));
Yinghai Lu1829ae92012-11-16 19:39:14 -0800436 pfn = end_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200437 }
438#endif
439
440 /* tail is not big page (2M) alignment */
Yinghai Lu1829ae92012-11-16 19:39:14 -0800441 start_pfn = pfn;
Yinghai Lu2e8059e2012-11-16 19:39:15 -0800442 end_pfn = limit_pfn;
Pekka Enbergf7650902009-03-05 14:55:05 +0200443 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
444
Yinghai Lu7de3d662013-05-31 08:53:07 -0700445 if (!after_bootmem)
446 adjust_range_page_size_mask(mr, nr_range);
447
Pekka Enbergf7650902009-03-05 14:55:05 +0200448 /* try to merge same page size and continuous */
449 for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
450 unsigned long old_start;
451 if (mr[i].end != mr[i+1].start ||
452 mr[i].page_size_mask != mr[i+1].page_size_mask)
453 continue;
454 /* move it */
455 old_start = mr[i].start;
456 memmove(&mr[i], &mr[i+1],
457 (nr_range - 1 - i) * sizeof(struct map_range));
458 mr[i--].start = old_start;
459 nr_range--;
460 }
461
462 for (i = 0; i < nr_range; i++)
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400463 pr_debug(" [mem %#010lx-%#010lx] page %s\n",
Bjorn Helgaas365811d2012-05-29 15:06:29 -0700464 mr[i].start, mr[i].end - 1,
Dave Hansenf15e0512015-02-10 13:20:30 -0800465 page_size_string(&mr[i]));
Pekka Enbergf7650902009-03-05 14:55:05 +0200466
Yinghai Lu4e33e062012-11-16 19:38:39 -0800467 return nr_range;
468}
469
Ingo Molnar08b46d52017-01-28 17:29:08 +0100470struct range pfn_mapped[E820_MAX_ENTRIES];
Yinghai Lu0e691cf2013-01-24 12:20:05 -0800471int nr_pfn_mapped;
Jacob Shin66520eb2012-11-16 19:38:52 -0800472
473static void add_pfn_range_mapped(unsigned long start_pfn, unsigned long end_pfn)
474{
Ingo Molnar08b46d52017-01-28 17:29:08 +0100475 nr_pfn_mapped = add_range_with_merge(pfn_mapped, E820_MAX_ENTRIES,
Jacob Shin66520eb2012-11-16 19:38:52 -0800476 nr_pfn_mapped, start_pfn, end_pfn);
Ingo Molnar08b46d52017-01-28 17:29:08 +0100477 nr_pfn_mapped = clean_sort_range(pfn_mapped, E820_MAX_ENTRIES);
Jacob Shin66520eb2012-11-16 19:38:52 -0800478
479 max_pfn_mapped = max(max_pfn_mapped, end_pfn);
480
481 if (start_pfn < (1UL<<(32-PAGE_SHIFT)))
482 max_low_pfn_mapped = max(max_low_pfn_mapped,
483 min(end_pfn, 1UL<<(32-PAGE_SHIFT)));
484}
485
486bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
487{
488 int i;
489
490 for (i = 0; i < nr_pfn_mapped; i++)
491 if ((start_pfn >= pfn_mapped[i].start) &&
492 (end_pfn <= pfn_mapped[i].end))
493 return true;
494
495 return false;
496}
497
Yinghai Lu2086fe12012-11-16 19:38:40 -0800498/*
Yinghai Lu4e33e062012-11-16 19:38:39 -0800499 * Setup the direct mapping of the physical memory at PAGE_OFFSET.
500 * This runs before bootmem is initialized and gets pages directly from
501 * the physical memory. To access them they are temporarily mapped.
502 */
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700503unsigned long __ref init_memory_mapping(unsigned long start,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700504 unsigned long end, pgprot_t prot)
Yinghai Lu4e33e062012-11-16 19:38:39 -0800505{
506 struct map_range mr[NR_RANGE_MR];
507 unsigned long ret = 0;
508 int nr_range, i;
509
Dan Williamsc9cdaeb2015-09-17 16:27:57 -0400510 pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
Yinghai Lu4e33e062012-11-16 19:38:39 -0800511 start, end - 1);
512
513 memset(mr, 0, sizeof(mr));
514 nr_range = split_mem_range(mr, 0, start, end);
515
Pekka Enbergf7650902009-03-05 14:55:05 +0200516 for (i = 0; i < nr_range; i++)
517 ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700518 mr[i].page_size_mask,
519 prot);
Pekka Enbergf7650902009-03-05 14:55:05 +0200520
Jacob Shin66520eb2012-11-16 19:38:52 -0800521 add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
522
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800523 return ret >> PAGE_SHIFT;
524}
525
Jacob Shin66520eb2012-11-16 19:38:52 -0800526/*
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800527 * We need to iterate through the E820 memory map and create direct mappings
Ingo Molnar09821ff2017-01-28 17:09:33 +0100528 * for only E820_TYPE_RAM and E820_KERN_RESERVED regions. We cannot simply
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800529 * create direct mappings for all pfns from [0 to max_low_pfn) and
530 * [4GB to max_pfn) because of possible memory holes in high addresses
531 * that cannot be marked as UC by fixed/variable range MTRRs.
532 * Depending on the alignment of E820 ranges, this may possibly result
533 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
534 *
535 * init_mem_mapping() calls init_range_memory_mapping() with big range.
536 * That range would have hole in the middle or ends, and only ram parts
537 * will be mapped in init_range_memory_mapping().
Jacob Shin66520eb2012-11-16 19:38:52 -0800538 */
Yinghai Lu8d574702012-11-16 19:38:58 -0800539static unsigned long __init init_range_memory_mapping(
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800540 unsigned long r_start,
541 unsigned long r_end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800542{
543 unsigned long start_pfn, end_pfn;
Yinghai Lu8d574702012-11-16 19:38:58 -0800544 unsigned long mapped_ram_size = 0;
Jacob Shin66520eb2012-11-16 19:38:52 -0800545 int i;
546
Jacob Shin66520eb2012-11-16 19:38:52 -0800547 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
Yinghai Lub8fd39c2012-11-16 19:39:18 -0800548 u64 start = clamp_val(PFN_PHYS(start_pfn), r_start, r_end);
549 u64 end = clamp_val(PFN_PHYS(end_pfn), r_start, r_end);
550 if (start >= end)
Jacob Shin66520eb2012-11-16 19:38:52 -0800551 continue;
552
Yinghai Luc9b32342013-01-24 12:19:42 -0800553 /*
554 * if it is overlapping with brk pgt, we need to
555 * alloc pgt buf from memblock instead.
556 */
557 can_use_brk_pgt = max(start, (u64)pgt_buf_end<<PAGE_SHIFT) >=
558 min(end, (u64)pgt_buf_top<<PAGE_SHIFT);
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700559 init_memory_mapping(start, end, PAGE_KERNEL);
Yinghai Lu8d574702012-11-16 19:38:58 -0800560 mapped_ram_size += end - start;
Yinghai Luc9b32342013-01-24 12:19:42 -0800561 can_use_brk_pgt = true;
Jacob Shin66520eb2012-11-16 19:38:52 -0800562 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800563
564 return mapped_ram_size;
Jacob Shin66520eb2012-11-16 19:38:52 -0800565}
566
Yinghai Lu69792872013-09-06 19:07:09 -0700567static unsigned long __init get_new_step_size(unsigned long step_size)
568{
569 /*
Jan Beulich132978b2014-12-19 16:10:54 +0000570 * Initial mapped size is PMD_SIZE (2M).
Yinghai Lu69792872013-09-06 19:07:09 -0700571 * We can not set step_size to be PUD_SIZE (1G) yet.
572 * In worse case, when we cross the 1G boundary, and
573 * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
Jan Beulich132978b2014-12-19 16:10:54 +0000574 * to map 1G range with PTE. Hence we use one less than the
575 * difference of page table level shifts.
Yinghai Lu69792872013-09-06 19:07:09 -0700576 *
Jan Beulich132978b2014-12-19 16:10:54 +0000577 * Don't need to worry about overflow in the top-down case, on 32bit,
578 * when step_size is 0, round_down() returns 0 for start, and that
579 * turns it into 0x100000000ULL.
580 * In the bottom-up case, round_up(x, 0) returns 0 though too, which
581 * needs to be taken into consideration by the code below.
Yinghai Lu69792872013-09-06 19:07:09 -0700582 */
Jan Beulich132978b2014-12-19 16:10:54 +0000583 return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
Yinghai Lu69792872013-09-06 19:07:09 -0700584}
585
Tang Chen0167d7d2013-11-12 15:08:02 -0800586/**
587 * memory_map_top_down - Map [map_start, map_end) top down
588 * @map_start: start address of the target memory range
589 * @map_end: end address of the target memory range
590 *
591 * This function will setup direct mapping for memory range
592 * [map_start, map_end) in top-down. That said, the page tables
593 * will be allocated at the end of the memory, and we map the
594 * memory in top-down.
595 */
596static void __init memory_map_top_down(unsigned long map_start,
597 unsigned long map_end)
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800598{
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200599 unsigned long real_end, last_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800600 unsigned long step_size;
601 unsigned long addr;
602 unsigned long mapped_ram_size = 0;
Yinghai Luab951932012-11-16 19:38:45 -0800603
Yinghai Lu98e7a982013-03-06 20:18:21 -0800604 /* xen has big range in reserved near end of ram, skip it at first.*/
Tang Chen0167d7d2013-11-12 15:08:02 -0800605 addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
Yinghai Lu8d574702012-11-16 19:38:58 -0800606 real_end = addr + PMD_SIZE;
607
608 /* step_size need to be small so pgt_buf from BRK could cover it */
609 step_size = PMD_SIZE;
610 max_pfn_mapped = 0; /* will get exact value next */
611 min_pfn_mapped = real_end >> PAGE_SHIFT;
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200612 last_start = real_end;
Zhang Yanfeicf8b1662013-05-09 23:57:42 +0800613
614 /*
615 * We start from the top (end of memory) and go to the bottom.
616 * The memblock_find_in_range() gets us a block of RAM from the
617 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
618 * for page table.
619 */
Tang Chen0167d7d2013-11-12 15:08:02 -0800620 while (last_start > map_start) {
Lukas Bulwahnbab202a2020-09-28 12:00:04 +0200621 unsigned long start;
622
Yinghai Lu8d574702012-11-16 19:38:58 -0800623 if (last_start > step_size) {
624 start = round_down(last_start - 1, step_size);
Tang Chen0167d7d2013-11-12 15:08:02 -0800625 if (start < map_start)
626 start = map_start;
Yinghai Lu8d574702012-11-16 19:38:58 -0800627 } else
Tang Chen0167d7d2013-11-12 15:08:02 -0800628 start = map_start;
Jan Beulich132978b2014-12-19 16:10:54 +0000629 mapped_ram_size += init_range_memory_mapping(start,
Yinghai Lu8d574702012-11-16 19:38:58 -0800630 last_start);
631 last_start = start;
632 min_pfn_mapped = last_start >> PAGE_SHIFT;
Jan Beulich132978b2014-12-19 16:10:54 +0000633 if (mapped_ram_size >= step_size)
Yinghai Lu69792872013-09-06 19:07:09 -0700634 step_size = get_new_step_size(step_size);
Yinghai Lu8d574702012-11-16 19:38:58 -0800635 }
636
Tang Chen0167d7d2013-11-12 15:08:02 -0800637 if (real_end < map_end)
638 init_range_memory_mapping(real_end, map_end);
639}
640
Tang Chenb959ed6c2013-11-12 15:08:05 -0800641/**
642 * memory_map_bottom_up - Map [map_start, map_end) bottom up
643 * @map_start: start address of the target memory range
644 * @map_end: end address of the target memory range
645 *
646 * This function will setup direct mapping for memory range
647 * [map_start, map_end) in bottom-up. Since we have limited the
648 * bottom-up allocation above the kernel, the page tables will
649 * be allocated just above the kernel and we map the memory
650 * in [map_start, map_end) in bottom-up.
651 */
652static void __init memory_map_bottom_up(unsigned long map_start,
653 unsigned long map_end)
654{
Jan Beulich132978b2014-12-19 16:10:54 +0000655 unsigned long next, start;
Tang Chenb959ed6c2013-11-12 15:08:05 -0800656 unsigned long mapped_ram_size = 0;
657 /* step_size need to be small so pgt_buf from BRK could cover it */
658 unsigned long step_size = PMD_SIZE;
659
660 start = map_start;
661 min_pfn_mapped = start >> PAGE_SHIFT;
662
663 /*
664 * We start from the bottom (@map_start) and go to the top (@map_end).
665 * The memblock_find_in_range() gets us a block of RAM from the
666 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
667 * for page table.
668 */
669 while (start < map_end) {
Jan Beulich132978b2014-12-19 16:10:54 +0000670 if (step_size && map_end - start > step_size) {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800671 next = round_up(start + 1, step_size);
672 if (next > map_end)
673 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000674 } else {
Tang Chenb959ed6c2013-11-12 15:08:05 -0800675 next = map_end;
Jan Beulich132978b2014-12-19 16:10:54 +0000676 }
Tang Chenb959ed6c2013-11-12 15:08:05 -0800677
Jan Beulich132978b2014-12-19 16:10:54 +0000678 mapped_ram_size += init_range_memory_mapping(start, next);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800679 start = next;
680
Jan Beulich132978b2014-12-19 16:10:54 +0000681 if (mapped_ram_size >= step_size)
Tang Chenb959ed6c2013-11-12 15:08:05 -0800682 step_size = get_new_step_size(step_size);
Tang Chenb959ed6c2013-11-12 15:08:05 -0800683 }
684}
685
Mike Rapoport88107d32020-06-08 21:33:01 -0700686/*
687 * The real mode trampoline, which is required for bootstrapping CPUs
688 * occupies only a small area under the low 1MB. See reserve_real_mode()
689 * for details.
690 *
691 * If KASLR is disabled the first PGD entry of the direct mapping is copied
692 * to map the real mode trampoline.
693 *
694 * If KASLR is enabled, copy only the PUD which covers the low 1MB
695 * area. This limits the randomization granularity to 1GB for both 4-level
696 * and 5-level paging.
697 */
698static void __init init_trampoline(void)
699{
700#ifdef CONFIG_X86_64
701 if (!kaslr_memory_enabled())
702 trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
703 else
704 init_trampoline_kaslr();
705#endif
706}
707
Tang Chen0167d7d2013-11-12 15:08:02 -0800708void __init init_mem_mapping(void)
709{
710 unsigned long end;
711
Thomas Gleixneraa8c6242017-12-04 15:07:36 +0100712 pti_check_boottime_disable();
Tang Chen0167d7d2013-11-12 15:08:02 -0800713 probe_page_size_mask();
Andy Lutomirskic7ad5ad2017-09-10 17:48:27 -0700714 setup_pcid();
Tang Chen0167d7d2013-11-12 15:08:02 -0800715
716#ifdef CONFIG_X86_64
717 end = max_pfn << PAGE_SHIFT;
718#else
719 end = max_low_pfn << PAGE_SHIFT;
720#endif
721
722 /* the ISA range is always mapped regardless of memory holes */
Logan Gunthorpec164fbb2020-04-10 14:33:24 -0700723 init_memory_mapping(0, ISA_END_ADDRESS, PAGE_KERNEL);
Tang Chen0167d7d2013-11-12 15:08:02 -0800724
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700725 /* Init the trampoline, possibly with KASLR memory offset */
726 init_trampoline();
727
Tang Chenb959ed6c2013-11-12 15:08:05 -0800728 /*
729 * If the allocation is in bottom-up direction, we setup direct mapping
730 * in bottom-up, otherwise we setup direct mapping in top-down.
731 */
732 if (memblock_bottom_up()) {
733 unsigned long kernel_end = __pa_symbol(_end);
734
735 /*
736 * we need two separate calls here. This is because we want to
737 * allocate page tables above the kernel. So we first map
738 * [kernel_end, end) to make memory above the kernel be mapped
739 * as soon as possible. And then use page tables allocated above
740 * the kernel to map [ISA_END_ADDRESS, kernel_end).
741 */
742 memory_map_bottom_up(kernel_end, end);
743 memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
744 } else {
745 memory_map_top_down(ISA_END_ADDRESS, end);
746 }
Yinghai Lu8d574702012-11-16 19:38:58 -0800747
Yinghai Luf763ad12012-11-16 19:38:57 -0800748#ifdef CONFIG_X86_64
749 if (max_pfn > max_low_pfn) {
750 /* can we preseve max_low_pfn ?*/
751 max_low_pfn = max_pfn;
752 }
Yinghai Lu719272c2012-11-16 19:39:06 -0800753#else
754 early_ioremap_page_table_range_init();
H. Peter Anvin8170e6b2013-01-24 12:19:52 -0800755#endif
756
Yinghai Lu719272c2012-11-16 19:39:06 -0800757 load_cr3(swapper_pg_dir);
758 __flush_tlb_all();
Yinghai Lu719272c2012-11-16 19:39:06 -0800759
Juergen Grossf72e38e2017-11-09 14:27:35 +0100760 x86_init.hyper.init_mem_mapping();
Juergen Grossc138d812017-07-28 12:23:12 +0200761
Yinghai Luc14fa0b2012-11-16 19:38:44 -0800762 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
Yinghai Lu22ddfca2012-11-16 19:38:41 -0800763}
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200764
Pekka Enberg540aca02009-03-04 11:46:40 +0200765/*
Nadav Amit4fc19702019-04-26 16:22:46 -0700766 * Initialize an mm_struct to be used during poking and a pointer to be used
767 * during patching.
768 */
769void __init poking_init(void)
770{
771 spinlock_t *ptl;
772 pte_t *ptep;
773
774 poking_mm = copy_init_mm();
775 BUG_ON(!poking_mm);
776
777 /*
778 * Randomize the poking address, but make sure that the following page
779 * will be mapped at the same PMD. We need 2 pages, so find space for 3,
780 * and adjust the address if the PMD ends after the first one.
781 */
782 poking_addr = TASK_UNMAPPED_BASE;
783 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
784 poking_addr += (kaslr_get_random_long("Poking") & PAGE_MASK) %
785 (TASK_SIZE - TASK_UNMAPPED_BASE - 3 * PAGE_SIZE);
786
787 if (((poking_addr + PAGE_SIZE) & ~PMD_MASK) == 0)
788 poking_addr += PAGE_SIZE;
789
790 /*
791 * We need to trigger the allocation of the page-tables that will be
792 * needed for poking now. Later, poking may be performed in an atomic
793 * section, which might cause allocation to fail.
794 */
795 ptep = get_locked_pte(poking_mm, poking_addr, &ptl);
796 BUG_ON(!ptep);
797 pte_unmap_unlock(ptep, ptl);
798}
799
800/*
Pekka Enberg540aca02009-03-04 11:46:40 +0200801 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
802 * is valid. The argument is a physical page number.
803 *
Kees Cooka4866aa2017-04-05 09:39:08 -0700804 * On x86, access has to be given to the first megabyte of RAM because that
805 * area traditionally contains BIOS code and data regions used by X, dosemu,
806 * and similar apps. Since they map the entire memory range, the whole range
807 * must be allowed (for mapping), but any areas that would otherwise be
808 * disallowed are flagged as being "zero filled" instead of rejected.
809 * Access has to be given to non-kernel-ram areas as well, these contain the
810 * PCI mmio resources as well as potential bios/acpi data regions.
Pekka Enberg540aca02009-03-04 11:46:40 +0200811 */
812int devmem_is_allowed(unsigned long pagenr)
813{
Dan Williams2bdce742018-06-14 15:26:24 -0700814 if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
815 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
816 != REGION_DISJOINT) {
Kees Cooka4866aa2017-04-05 09:39:08 -0700817 /*
818 * For disallowed memory regions in the low 1MB range,
819 * request that the page be shown as all zeros.
820 */
821 if (pagenr < 256)
822 return 2;
823
Pekka Enberg540aca02009-03-04 11:46:40 +0200824 return 0;
Kees Cooka4866aa2017-04-05 09:39:08 -0700825 }
826
827 /*
828 * This must follow RAM test, since System RAM is considered a
829 * restricted resource under CONFIG_STRICT_IOMEM.
830 */
831 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
832 /* Low 1MB bypasses iomem restrictions. */
833 if (pagenr < 256)
834 return 1;
835
836 return 0;
837 }
838
839 return 1;
Pekka Enberg540aca02009-03-04 11:46:40 +0200840}
841
Alexey Dobriyane5cb1132018-12-28 00:36:03 -0800842void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200843{
Yinghai Luc967da62010-03-28 19:42:55 -0700844 unsigned long begin_aligned, end_aligned;
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200845
Yinghai Luc967da62010-03-28 19:42:55 -0700846 /* Make sure boundaries are page aligned */
847 begin_aligned = PAGE_ALIGN(begin);
848 end_aligned = end & PAGE_MASK;
849
850 if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
851 begin = begin_aligned;
852 end = end_aligned;
853 }
854
855 if (begin >= end)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200856 return;
857
858 /*
859 * If debugging page accesses then do not free this memory but
860 * mark them not present - any buggy init-section access will
861 * create a kernel page fault:
862 */
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700863 if (debug_pagealloc_enabled()) {
864 pr_info("debug: unmapping init [mem %#010lx-%#010lx]\n",
865 begin, end - 1);
Qian Cai0d021132019-04-23 12:58:11 -0400866 /*
867 * Inform kmemleak about the hole in the memory since the
868 * corresponding pages will be unmapped.
869 */
870 kmemleak_free_part((void *)begin, end - begin);
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700871 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
872 } else {
873 /*
874 * We just marked the kernel text read only above, now that
875 * we are going to free part of that, we need to make that
876 * writeable and non-executable first.
877 */
878 set_memory_nx(begin, (end - begin) >> PAGE_SHIFT);
879 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200880
Christian Borntraegera75e1f62016-03-15 14:57:39 -0700881 free_reserved_area((void *)begin, (void *)end,
882 POISON_FREE_INITMEM, what);
883 }
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200884}
885
Dave Hansen6ea27382018-08-02 15:58:29 -0700886/*
887 * begin/end can be in the direct map or the "high kernel mapping"
888 * used for the kernel image only. free_init_pages() will do the
889 * right thing for either kind of address.
890 */
Kees Cook5494c3a2019-10-29 14:13:49 -0700891void free_kernel_image_pages(const char *what, void *begin, void *end)
Dave Hansen6ea27382018-08-02 15:58:29 -0700892{
Dave Hansenc40a56a2018-08-02 15:58:31 -0700893 unsigned long begin_ul = (unsigned long)begin;
894 unsigned long end_ul = (unsigned long)end;
895 unsigned long len_pages = (end_ul - begin_ul) >> PAGE_SHIFT;
896
Kees Cook5494c3a2019-10-29 14:13:49 -0700897 free_init_pages(what, begin_ul, end_ul);
Dave Hansenc40a56a2018-08-02 15:58:31 -0700898
899 /*
900 * PTI maps some of the kernel into userspace. For performance,
901 * this includes some kernel areas that do not contain secrets.
902 * Those areas might be adjacent to the parts of the kernel image
903 * being freed, which may contain secrets. Remove the "high kernel
904 * image mapping" for these freed areas, ensuring they are not even
905 * potentially vulnerable to Meltdown regardless of the specific
906 * optimizations PTI is currently using.
907 *
908 * The "noalias" prevents unmapping the direct map alias which is
909 * needed to access the freed pages.
910 *
911 * This is only valid for 64bit kernels. 32bit has only one mapping
912 * which can't be treated in this way for obvious reasons.
913 */
914 if (IS_ENABLED(CONFIG_X86_64) && cpu_feature_enabled(X86_FEATURE_PTI))
915 set_memory_np_noalias(begin_ul, len_pages);
Dave Hansen6ea27382018-08-02 15:58:29 -0700916}
917
Denys Vlasenko18278222016-09-18 20:21:25 +0200918void __ref free_initmem(void)
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200919{
Ingo Molnar0c6fc112017-01-28 22:52:16 +0100920 e820__reallocate_tables();
Denys Vlasenko47533962016-09-17 23:39:26 +0200921
Brijesh Singhb3f09072018-09-14 08:45:58 -0500922 mem_encrypt_free_decrypted_mem();
923
Kees Cook5494c3a2019-10-29 14:13:49 -0700924 free_kernel_image_pages("unused kernel image (initmem)",
925 &__init_begin, &__init_end);
Pekka Enberge5b2bb52009-03-03 13:15:06 +0200926}
Pekka Enberg731ddea2009-03-04 11:13:40 +0200927
928#ifdef CONFIG_BLK_DEV_INITRD
Jan Beulich0d26d1d2012-06-18 11:30:20 +0100929void __init free_initrd_mem(unsigned long start, unsigned long end)
Pekka Enberg731ddea2009-03-04 11:13:40 +0200930{
Fenghua Yucd745be2012-12-20 23:44:31 -0800931 /*
Yinghai Luc967da62010-03-28 19:42:55 -0700932 * end could be not aligned, and We can not align that,
933 * decompresser could be confused by aligned initrd_end
934 * We already reserve the end partial page before in
935 * - i386_start_kernel()
936 * - x86_64_start_kernel()
937 * - relocate_initrd()
938 * So here We can do PAGE_ALIGN() safely to get partial page to be freed
939 */
Jiang Liuc88442e2013-07-03 15:02:58 -0700940 free_init_pages("initrd", start, PAGE_ALIGN(end));
Pekka Enberg731ddea2009-03-04 11:13:40 +0200941}
942#endif
Pekka Enberg17623912011-11-01 15:58:22 +0200943
Ingo Molnar4270fd82017-01-28 12:45:40 +0100944/*
945 * Calculate the precise size of the DMA zone (first 16 MB of RAM),
946 * and pass it to the MM layer - to help it set zone watermarks more
947 * accurately.
948 *
949 * Done on 64-bit systems only for the time being, although 32-bit systems
950 * might benefit from this as well.
951 */
952void __init memblock_find_dma_reserve(void)
953{
954#ifdef CONFIG_X86_64
955 u64 nr_pages = 0, nr_free_pages = 0;
956 unsigned long start_pfn, end_pfn;
957 phys_addr_t start_addr, end_addr;
958 int i;
959 u64 u;
960
961 /*
962 * Iterate over all memory ranges (free and reserved ones alike),
963 * to calculate the total number of pages in the first 16 MB of RAM:
964 */
965 nr_pages = 0;
966 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
967 start_pfn = min(start_pfn, MAX_DMA_PFN);
968 end_pfn = min(end_pfn, MAX_DMA_PFN);
969
970 nr_pages += end_pfn - start_pfn;
971 }
972
973 /*
974 * Iterate over free memory ranges to calculate the number of free
975 * pages in the DMA zone, while not counting potential partial
976 * pages at the beginning or the end of the range:
977 */
978 nr_free_pages = 0;
979 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start_addr, &end_addr, NULL) {
980 start_pfn = min_t(unsigned long, PFN_UP(start_addr), MAX_DMA_PFN);
981 end_pfn = min_t(unsigned long, PFN_DOWN(end_addr), MAX_DMA_PFN);
982
983 if (start_pfn < end_pfn)
984 nr_free_pages += end_pfn - start_pfn;
985 }
986
987 set_dma_reserve(nr_pages - nr_free_pages);
988#endif
989}
990
Pekka Enberg17623912011-11-01 15:58:22 +0200991void __init zone_sizes_init(void)
992{
993 unsigned long max_zone_pfns[MAX_NR_ZONES];
994
995 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
996
997#ifdef CONFIG_ZONE_DMA
Xishi Qiuc072b902014-12-10 10:09:01 +0800998 max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +0200999#endif
1000#ifdef CONFIG_ZONE_DMA32
Xishi Qiuc072b902014-12-10 10:09:01 +08001001 max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn);
Pekka Enberg17623912011-11-01 15:58:22 +02001002#endif
1003 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
1004#ifdef CONFIG_HIGHMEM
1005 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
1006#endif
1007
Mike Rapoport9691a072020-06-03 15:57:10 -07001008 free_area_init(max_zone_pfns);
Pekka Enberg17623912011-11-01 15:58:22 +02001009}
1010
Peter Zijlstra6fd166a2017-12-04 15:07:59 +01001011__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
Andy Lutomirski3d28ebc2017-05-28 10:00:15 -07001012 .loaded_mm = &init_mm,
Andy Lutomirski10af6232017-07-24 21:41:38 -07001013 .next_asid = 1,
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001014 .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
1015};
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07001016
Juergen Grossbd809af2014-11-03 14:02:03 +01001017void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
1018{
1019 /* entry 0 MUST be WB (hardwired to speed up translations) */
1020 BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
1021
1022 __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
1023 __pte2cachemode_tbl[entry] = cache;
1024}
Andi Kleen377eeaa2018-06-13 15:48:28 -07001025
Vlastimil Babka792adb92018-08-14 20:50:47 +02001026#ifdef CONFIG_SWAP
Andi Kleen377eeaa2018-06-13 15:48:28 -07001027unsigned long max_swapfile_size(void)
1028{
1029 unsigned long pages;
1030
1031 pages = generic_max_swapfile_size();
1032
Michal Hocko5b5e4d62018-11-13 19:49:10 +01001033 if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
Andi Kleen377eeaa2018-06-13 15:48:28 -07001034 /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
Vlastimil Babkab0a182f2018-08-23 15:44:18 +02001035 unsigned long long l1tf_limit = l1tf_pfn_limit();
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001036 /*
1037 * We encode swap offsets also with 3 bits below those for pfn
1038 * which makes the usable limit higher.
1039 */
Vlastimil Babka0d0f6242018-06-22 17:39:33 +02001040#if CONFIG_PGTABLE_LEVELS > 2
Vlastimil Babka1a7ed1b2018-06-21 12:36:29 +02001041 l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
1042#endif
Vlastimil Babka9df95162018-08-20 11:58:35 +02001043 pages = min_t(unsigned long long, l1tf_limit, pages);
Andi Kleen377eeaa2018-06-13 15:48:28 -07001044 }
1045 return pages;
1046}
Vlastimil Babka792adb92018-08-14 20:50:47 +02001047#endif