blob: 054be44d1cdb44f1e683a5e1c42d3c5a2995ec24 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/init.c
4 *
Russell King90072052005-10-28 14:48:37 +01005 * Copyright (C) 1995-2005 Russell King
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/kernel.h>
8#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/swap.h>
10#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010012#include <linux/sched/signal.h>
Ingo Molnar29930022017-02-08 18:51:36 +010013#include <linux/sched/task.h>
Paul Gortmakerdc280942011-07-31 16:17:29 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/nodemask.h>
16#include <linux/initrd.h>
Grant Likely9eb8f672011-04-28 14:27:20 -060017#include <linux/of_fdt.h>
Nicolas Pitre3835f6c2008-09-17 15:21:55 -040018#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/gfp.h>
Russell King2778f622010-07-09 16:27:52 +010020#include <linux/memblock.h>
Marek Szyprowskic7909502011-12-29 13:09:51 +010021#include <linux/dma-contiguous.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010022#include <linux/sizes.h>
Laura Abbott08925c22015-11-30 19:36:28 +010023#include <linux/stop_machine.h>
Christoph Hellwigad3c7b12019-07-23 11:33:12 +020024#include <linux/swiotlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Russell Kingb4b20ad82014-04-13 18:57:29 +010026#include <asm/cp15.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/mach-types.h>
Russell King716a3dc2012-01-13 15:00:51 +000028#include <asm/memblock.h>
Afzal Mohammedd2ca5f22017-01-29 17:31:32 +010029#include <asm/memory.h>
Grant Likely93c02ab2011-04-28 14:27:21 -060030#include <asm/prom.h>
Russell King37efe642008-12-01 11:53:07 +000031#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/setup.h>
Ben Dooks (Codethink)9110f3e2019-10-11 13:51:43 +010033#include <asm/set_memory.h>
Kees Cook1e6b4812014-04-03 17:28:11 -070034#include <asm/system_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/tlb.h>
Fenkart/Bostandzhyandb9ef1a2010-02-07 21:45:47 +010036#include <asm/fixmap.h>
Jinbum Parka8e53c12017-12-12 01:43:57 +010037#include <asm/ptdump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#include <asm/mach/arch.h>
40#include <asm/mach/map.h>
41
Russell King1b2e2b72006-08-21 17:06:38 +010042#include "mm.h"
43
Russell Kingb4b20ad82014-04-13 18:57:29 +010044#ifdef CONFIG_CPU_CP15_MMU
45unsigned long __init __clear_cr(unsigned long mask)
46{
Russell Kingb4b20ad82014-04-13 18:57:29 +010047 cr_alignment = cr_alignment & ~mask;
48 return cr_alignment;
49}
50#endif
51
Florian Fainellib1ab95c2018-11-05 14:54:27 -080052#ifdef CONFIG_BLK_DEV_INITRD
Russell King012d1f42008-09-06 10:57:03 +010053static int __init parse_tag_initrd(const struct tag *tag)
54{
Russell King4ed89f22014-10-28 11:26:42 +000055 pr_warn("ATAG_INITRD is deprecated; "
Russell King012d1f42008-09-06 10:57:03 +010056 "please update your bootloader.\n");
57 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
58 phys_initrd_size = tag->u.initrd.size;
59 return 0;
60}
61
62__tagtable(ATAG_INITRD, parse_tag_initrd);
63
64static int __init parse_tag_initrd2(const struct tag *tag)
65{
66 phys_initrd_start = tag->u.initrd.start;
67 phys_initrd_size = tag->u.initrd.size;
68 return 0;
69}
70
71__tagtable(ATAG_INITRD2, parse_tag_initrd2);
Florian Fainellib1ab95c2018-11-05 14:54:27 -080072#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
Russell Kingf25b4b42010-10-27 19:49:33 +010074static void __init find_limits(unsigned long *min, unsigned long *max_low,
Nicolas Pitre27a3f0e2011-08-25 19:10:29 -040075 unsigned long *max_high)
Russell Kingdde58282009-08-15 12:36:00 +010076{
Laura Abbott1c2f87c2014-04-13 22:54:58 +010077 *max_low = PFN_DOWN(memblock_get_current_limit());
78 *min = PFN_UP(memblock_start_of_DRAM());
79 *max_high = PFN_DOWN(memblock_end_of_DRAM());
Russell Kingdde58282009-08-15 12:36:00 +010080}
81
Russell Kingbe209022011-05-11 15:39:00 +010082#ifdef CONFIG_ZONE_DMA
Nicolas Pitre65032012011-07-18 15:05:10 -040083
Rob Herring364230b2013-08-01 15:29:29 -050084phys_addr_t arm_dma_zone_size __read_mostly;
Nicolas Pitre65032012011-07-18 15:05:10 -040085EXPORT_SYMBOL(arm_dma_zone_size);
86
Russell King022ae532011-07-08 21:26:59 +010087/*
88 * The DMA mask corresponding to the maximum bus address allocatable
89 * using GFP_DMA. The default here places no restriction on DMA
90 * allocations. This must be the smallest DMA mask in the system,
91 * so a successful GFP_DMA allocation will always satisfy this.
92 */
Marek Szyprowski4986e5c2012-06-06 12:05:01 +020093phys_addr_t arm_dma_limit;
Russell King4dcfa602013-07-09 12:14:49 +010094unsigned long arm_dma_pfn_limit;
Russell King022ae532011-07-08 21:26:59 +010095
Russell Kingbe209022011-05-11 15:39:00 +010096static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
97 unsigned long dma_size)
98{
99 if (size[0] <= dma_size)
100 return;
101
102 size[ZONE_NORMAL] = size[0] - dma_size;
103 size[ZONE_DMA] = dma_size;
104 hole[ZONE_NORMAL] = hole[0];
105 hole[ZONE_DMA] = 0;
106}
107#endif
108
Russell Kingff69a4c2013-07-26 14:55:59 +0100109void __init setup_dma_zone(const struct machine_desc *mdesc)
Marek Szyprowskic7909502011-12-29 13:09:51 +0100110{
111#ifdef CONFIG_ZONE_DMA
112 if (mdesc->dma_zone_size) {
113 arm_dma_zone_size = mdesc->dma_zone_size;
Russell King6bcac802014-01-07 17:53:54 +0000114 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100115 } else
116 arm_dma_limit = 0xffffffff;
Russell King4dcfa602013-07-09 12:14:49 +0100117 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100118#endif
119}
120
Santosh Shilimkar84f452b2013-06-30 00:28:46 -0400121static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
Russell Kinga2c54d22010-10-27 19:17:31 +0100122 unsigned long max_high)
Russell Kingb7a69ac2008-10-01 16:58:32 +0100123{
124 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
Russell Kinga2c54d22010-10-27 19:17:31 +0100125 struct memblock_region *reg;
Russell Kingb7a69ac2008-10-01 16:58:32 +0100126
Russell King90072052005-10-28 14:48:37 +0100127 /*
Russell Kingbe370302010-05-07 17:40:33 +0100128 * initialise the zones.
Russell King90072052005-10-28 14:48:37 +0100129 */
130 memset(zone_size, 0, sizeof(zone_size));
Russell King90072052005-10-28 14:48:37 +0100131
132 /*
Russell Kingbe370302010-05-07 17:40:33 +0100133 * The memory size has already been determined. If we need
134 * to do anything fancy with the allocation of this memory
135 * to the zones, now is the time to do it.
Russell King90072052005-10-28 14:48:37 +0100136 */
Russell Kingdde58282009-08-15 12:36:00 +0100137 zone_size[0] = max_low - min;
138#ifdef CONFIG_HIGHMEM
139 zone_size[ZONE_HIGHMEM] = max_high - max_low;
140#endif
Russell King90072052005-10-28 14:48:37 +0100141
142 /*
Russell Kingbe370302010-05-07 17:40:33 +0100143 * Calculate the size of the holes.
144 * holes = node_size - sum(bank_sizes)
Russell King90072052005-10-28 14:48:37 +0100145 */
Russell Kingdde58282009-08-15 12:36:00 +0100146 memcpy(zhole_size, zone_size, sizeof(zhole_size));
Russell Kinga2c54d22010-10-27 19:17:31 +0100147 for_each_memblock(memory, reg) {
148 unsigned long start = memblock_region_memory_base_pfn(reg);
149 unsigned long end = memblock_region_memory_end_pfn(reg);
150
151 if (start < max_low) {
152 unsigned long low_end = min(end, max_low);
153 zhole_size[0] -= low_end - start;
154 }
Russell Kingdde58282009-08-15 12:36:00 +0100155#ifdef CONFIG_HIGHMEM
Russell Kinga2c54d22010-10-27 19:17:31 +0100156 if (end > max_low) {
157 unsigned long high_start = max(start, max_low);
158 zhole_size[ZONE_HIGHMEM] -= end - high_start;
159 }
Russell Kingdde58282009-08-15 12:36:00 +0100160#endif
Russell Kingdde58282009-08-15 12:36:00 +0100161 }
Russell King90072052005-10-28 14:48:37 +0100162
Nicolas Pitre65032012011-07-18 15:05:10 -0400163#ifdef CONFIG_ZONE_DMA
Russell King90072052005-10-28 14:48:37 +0100164 /*
165 * Adjust the sizes according to any special requirements for
166 * this machine type.
167 */
Marek Szyprowskic7909502011-12-29 13:09:51 +0100168 if (arm_dma_zone_size)
Nicolas Pitre65032012011-07-18 15:05:10 -0400169 arm_adjust_dma_zone(zone_size, zhole_size,
170 arm_dma_zone_size >> PAGE_SHIFT);
Russell Kingbe209022011-05-11 15:39:00 +0100171#endif
Russell King90072052005-10-28 14:48:37 +0100172
Russell Kingbe370302010-05-07 17:40:33 +0100173 free_area_init_node(0, zone_size, min, zhole_size);
Russell King90072052005-10-28 14:48:37 +0100174}
175
Will Deacon7b7bf492011-05-19 13:21:14 +0100176#ifdef CONFIG_HAVE_ARCH_PFN_VALID
Russell Kingb7cfda92009-09-07 15:06:42 +0100177int pfn_valid(unsigned long pfn)
178{
zhaoyang5b3efa42019-08-26 04:07:37 +0100179 phys_addr_t addr = __pfn_to_phys(pfn);
180
181 if (__phys_to_pfn(addr) != pfn)
182 return 0;
183
Clemens Gruber032be722019-09-22 14:25:51 +0100184 return memblock_is_map_memory(addr);
Russell Kingb7cfda92009-09-07 15:06:42 +0100185}
186EXPORT_SYMBOL(pfn_valid);
Will Deacon7b7bf492011-05-19 13:21:14 +0100187#endif
Russell King657e12f2009-10-29 17:06:17 +0000188
Russell King716a3dc2012-01-13 15:00:51 +0000189static bool arm_memblock_steal_permitted = true;
190
Russell Kingbc2827d2012-01-19 14:35:19 +0000191phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
Russell King716a3dc2012-01-13 15:00:51 +0000192{
193 phys_addr_t phys;
194
195 BUG_ON(!arm_memblock_steal_permitted);
196
Mike Rapoportf240ec02019-03-11 23:29:06 -0700197 phys = memblock_phys_alloc(size, align);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700198 if (!phys)
199 panic("Failed to steal %pa bytes at %pS\n",
200 &size, (void *)_RET_IP_);
201
Russell King716a3dc2012-01-13 15:00:51 +0000202 memblock_free(phys, size);
203 memblock_remove(phys, size);
204
205 return phys;
206}
207
Russell King39286242017-01-16 15:11:10 +0000208static void __init arm_initrd_init(void)
Russell King2778f622010-07-09 16:27:52 +0100209{
Russell King2778f622010-07-09 16:27:52 +0100210#ifdef CONFIG_BLK_DEV_INITRD
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000211 phys_addr_t start;
212 unsigned long size;
213
Ben Peddell4c235cb2014-01-13 23:25:18 +0100214 initrd_start = initrd_end = 0;
Russell King68b32f32017-01-16 15:13:25 +0000215
216 if (!phys_initrd_size)
217 return;
218
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000219 /*
220 * Round the memory region to page boundaries as per free_initrd_mem()
221 * This allows us to detect whether the pages overlapping the initrd
222 * are in use, but more importantly, reserves the entire set of pages
223 * as we don't want these pages allocated for other purposes.
224 */
225 start = round_down(phys_initrd_start, PAGE_SIZE);
226 size = phys_initrd_size + (phys_initrd_start - start);
227 size = round_up(size, PAGE_SIZE);
228
229 if (!memblock_is_region_memory(start, size)) {
Vitaly Andrianovde22cc62012-06-22 14:26:04 -0400230 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000231 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000232 return;
Russell King2778f622010-07-09 16:27:52 +0100233 }
Russell King68b32f32017-01-16 15:13:25 +0000234
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000235 if (memblock_is_region_reserved(start, size)) {
Russell King2778f622010-07-09 16:27:52 +0100236 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000237 (u64)start, size);
Russell King68b32f32017-01-16 15:13:25 +0000238 return;
Russell King2778f622010-07-09 16:27:52 +0100239 }
Russell King2778f622010-07-09 16:27:52 +0100240
Russell Kingcdcc5fa2017-01-16 15:21:05 +0000241 memblock_reserve(start, size);
Russell King68b32f32017-01-16 15:13:25 +0000242
243 /* Now convert initrd to virtual addresses */
244 initrd_start = __phys_to_virt(phys_initrd_start);
245 initrd_end = initrd_start + phys_initrd_size;
Russell King2778f622010-07-09 16:27:52 +0100246#endif
Russell King39286242017-01-16 15:11:10 +0000247}
248
Marek Szyprowski5f41f912019-05-28 09:38:14 +0100249#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
250void check_cpu_icache_size(int cpuid)
251{
252 u32 size, ctr;
253
254 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
255
256 size = 1 << ((ctr & 0xf) + 2);
257 if (cpuid != 0 && icache_size != size)
258 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
259 cpuid);
260 if (icache_size > size)
261 icache_size = size;
262}
263#endif
264
Russell King39286242017-01-16 15:11:10 +0000265void __init arm_memblock_init(const struct machine_desc *mdesc)
266{
267 /* Register the kernel text, kernel data and initrd with memblock. */
268 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
269
270 arm_initrd_init();
Russell King2778f622010-07-09 16:27:52 +0100271
272 arm_mm_memblock_reserve();
273
Russell King8d717a52010-05-22 19:47:18 +0100274 /* reserve any platform specific memblock areas */
275 if (mdesc->reserve)
276 mdesc->reserve();
277
Ard Biesheuvel24bbd922015-06-01 13:40:31 +0200278 early_init_fdt_reserve_self();
Marek Szyprowskibcedb5f2014-02-28 14:42:54 +0100279 early_init_fdt_scan_reserved_mem();
280
George G. Davis99a468d2015-01-16 11:21:05 +0100281 /* reserve memory for DMA contiguous allocations */
Marek Szyprowski95b0e652014-10-09 15:26:49 -0700282 dma_contiguous_reserve(arm_dma_limit);
Marek Szyprowskic7909502011-12-29 13:09:51 +0100283
Russell King716a3dc2012-01-13 15:00:51 +0000284 arm_memblock_steal_permitted = false;
Russell King2778f622010-07-09 16:27:52 +0100285 memblock_dump_all();
286}
287
Russell King8d717a52010-05-22 19:47:18 +0100288void __init bootmem_init(void)
Russell King90072052005-10-28 14:48:37 +0100289{
Grygorii Strashko8e58cae2013-11-23 14:42:18 -0500290 memblock_allow_resize();
Russell Kingdde58282009-08-15 12:36:00 +0100291
Doug Berger071d1842019-01-22 21:05:10 +0100292 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
Russell Kingbe370302010-05-07 17:40:33 +0100293
Doug Berger071d1842019-01-22 21:05:10 +0100294 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
295 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
Vladimir Murzind30eae42015-04-14 15:48:37 -0700296
Russell Kingbe370302010-05-07 17:40:33 +0100297 /*
Russell Kingbe370302010-05-07 17:40:33 +0100298 * Sparsemem tries to allocate bootmem in memory_present(),
299 * so must be done after the fixed reservations
300 */
Peng Fan14b5f542019-03-19 14:34:32 +0100301 memblocks_present();
Russell King90072052005-10-28 14:48:37 +0100302
Russell Kingb7a69ac2008-10-01 16:58:32 +0100303 /*
304 * sparse_init() needs the bootmem allocator up and running.
305 */
306 sparse_init();
307
308 /*
Russell Kingbe370302010-05-07 17:40:33 +0100309 * Now free the memory - free_area_init_node needs
Russell Kingb7a69ac2008-10-01 16:58:32 +0100310 * the sparse mem_map arrays initialized by sparse_init()
311 * for memmap_init_zone(), otherwise all PFNs are invalid.
312 */
Doug Berger071d1842019-01-22 21:05:10 +0100313 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
Russell King90072052005-10-28 14:48:37 +0100314}
315
Stephen Boyd54d52572011-07-07 18:43:36 +0100316/*
317 * Poison init memory with an undefined instruction (ARM) or a branch to an
318 * undefined instruction (Thumb).
319 */
320static inline void poison_init_mem(void *s, size_t count)
321{
322 u32 *p = (u32 *)s;
Jamie Ilesbf912d92011-08-04 09:39:31 +0100323 for (; count != 0; count -= 4)
Stephen Boyd54d52572011-07-07 18:43:36 +0100324 *p++ = 0xe7fddef0;
325}
326
Olof Johansson31f30102019-12-18 01:18:49 +0100327static inline void __init
Russell Kingbe370302010-05-07 17:40:33 +0100328free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Russell Kinga0130532005-06-27 14:16:47 +0100329{
330 struct page *start_pg, *end_pg;
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400331 phys_addr_t pg, pgend;
Russell Kinga0130532005-06-27 14:16:47 +0100332
333 /*
334 * Convert start_pfn/end_pfn to a struct page pointer.
335 */
Catalin Marinas3257f432009-10-06 17:57:22 +0100336 start_pg = pfn_to_page(start_pfn - 1) + 1;
Will Deacon9af386c2011-04-28 18:44:31 +0100337 end_pg = pfn_to_page(end_pfn - 1) + 1;
Russell Kinga0130532005-06-27 14:16:47 +0100338
339 /*
340 * Convert to physical addresses, and
341 * round start upwards and end downwards.
342 */
Vitaly Andrianov56bc6282012-06-21 08:09:05 -0400343 pg = PAGE_ALIGN(__pa(start_pg));
344 pgend = __pa(end_pg) & PAGE_MASK;
Russell Kinga0130532005-06-27 14:16:47 +0100345
346 /*
347 * If there are free pages between these,
348 * free the section of the memmap array.
349 */
350 if (pg < pgend)
Santosh Shilimkarcfb665862014-01-21 15:50:49 -0800351 memblock_free_early(pg, pgend - pg);
Russell Kinga0130532005-06-27 14:16:47 +0100352}
353
354/*
355 * The mem_map array can get very big. Free the unused area of the memory map.
356 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100357static void __init free_unused_memmap(void)
Russell Kinga0130532005-06-27 14:16:47 +0100358{
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100359 unsigned long start, prev_end = 0;
360 struct memblock_region *reg;
Russell Kinga0130532005-06-27 14:16:47 +0100361
362 /*
Michael Bohan3260e522010-06-14 13:06:56 -0700363 * This relies on each bank being in address order.
364 * The banks are sorted previously in bootmem_init().
Russell Kinga0130532005-06-27 14:16:47 +0100365 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100366 for_each_memblock(memory, reg) {
367 start = memblock_region_memory_base_pfn(reg);
Russell Kinga0130532005-06-27 14:16:47 +0100368
Will Deacon9af386c2011-04-28 18:44:31 +0100369#ifdef CONFIG_SPARSEMEM
370 /*
371 * Take care not to free memmap entries that don't exist
372 * due to SPARSEMEM sections which aren't present.
373 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100374 start = min(start,
375 ALIGN(prev_end, PAGES_PER_SECTION));
Linus Walleij002ea9eef2011-09-29 09:37:23 +0100376#else
377 /*
378 * Align down here since the VM subsystem insists that the
379 * memmap entries are valid from the bank start aligned to
380 * MAX_ORDER_NR_PAGES.
381 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100382 start = round_down(start, MAX_ORDER_NR_PAGES);
Will Deacon9af386c2011-04-28 18:44:31 +0100383#endif
Russell Kinga0130532005-06-27 14:16:47 +0100384 /*
385 * If we had a previous bank, and there is a space
386 * between the current bank and the previous, free it.
387 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100388 if (prev_end && prev_end < start)
389 free_memmap(prev_end, start);
Russell Kinga0130532005-06-27 14:16:47 +0100390
Michael Bohan3260e522010-06-14 13:06:56 -0700391 /*
392 * Align up here since the VM subsystem insists that the
393 * memmap entries are valid from the bank end aligned to
394 * MAX_ORDER_NR_PAGES.
395 */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100396 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
397 MAX_ORDER_NR_PAGES);
Russell Kinga0130532005-06-27 14:16:47 +0100398 }
Will Deacon9af386c2011-04-28 18:44:31 +0100399
400#ifdef CONFIG_SPARSEMEM
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100401 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
402 free_memmap(prev_end,
403 ALIGN(prev_end, PAGES_PER_SECTION));
Will Deacon9af386c2011-04-28 18:44:31 +0100404#endif
Russell Kinga0130532005-06-27 14:16:47 +0100405}
406
Jiang Liu83db0382013-04-29 15:06:26 -0700407#ifdef CONFIG_HIGHMEM
408static inline void free_area_high(unsigned long pfn, unsigned long end)
409{
Jiang Liudd6911e2013-04-29 15:07:03 -0700410 for (; pfn < end; pfn++)
411 free_highmem_page(pfn_to_page(pfn));
Jiang Liu83db0382013-04-29 15:06:26 -0700412}
413#endif
414
Russell Kingd0e775a2010-10-27 19:37:06 +0100415static void __init free_highpages(void)
416{
417#ifdef CONFIG_HIGHMEM
Santosh Shilimkar26ba47b2013-08-01 03:12:01 +0100418 unsigned long max_low = max_low_pfn;
Russell Kingdf4f14c2010-10-27 19:45:49 +0100419 struct memblock_region *mem, *res;
Russell Kingd0e775a2010-10-27 19:37:06 +0100420
421 /* set highmem page free */
Russell Kingdf4f14c2010-10-27 19:45:49 +0100422 for_each_memblock(memory, mem) {
423 unsigned long start = memblock_region_memory_base_pfn(mem);
424 unsigned long end = memblock_region_memory_end_pfn(mem);
425
426 /* Ignore complete lowmem entries */
427 if (end <= max_low)
428 continue;
429
Ard Biesheuvel09414d02015-10-01 17:58:11 +0200430 if (memblock_is_nomap(mem))
431 continue;
432
Russell Kingdf4f14c2010-10-27 19:45:49 +0100433 /* Truncate partial highmem entries */
434 if (start < max_low)
435 start = max_low;
436
437 /* Find and exclude any reserved regions */
438 for_each_memblock(reserved, res) {
439 unsigned long res_start, res_end;
440
441 res_start = memblock_region_reserved_base_pfn(res);
442 res_end = memblock_region_reserved_end_pfn(res);
443
444 if (res_end < start)
445 continue;
446 if (res_start < start)
447 res_start = start;
448 if (res_start > end)
449 res_start = end;
450 if (res_end > end)
451 res_end = end;
452 if (res_start != start)
Jiang Liu83db0382013-04-29 15:06:26 -0700453 free_area_high(start, res_start);
Russell Kingdf4f14c2010-10-27 19:45:49 +0100454 start = res_end;
455 if (start == end)
456 break;
457 }
458
459 /* And now free anything which remains */
460 if (start < end)
Jiang Liu83db0382013-04-29 15:06:26 -0700461 free_area_high(start, end);
Russell Kingd0e775a2010-10-27 19:37:06 +0100462 }
Russell Kingd0e775a2010-10-27 19:37:06 +0100463#endif
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/*
467 * mem_init() marks the free areas in the mem_map and tells us how much
468 * memory is free. This is done after various parts of the system have
469 * claimed their memory after the kernel image.
470 */
471void __init mem_init(void)
472{
Christoph Hellwigad3c7b12019-07-23 11:33:12 +0200473#ifdef CONFIG_ARM_LPAE
474 swiotlb_init(1);
475#endif
476
Santosh Shilimkarb3ba41f2013-11-23 14:36:42 -0500477 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 /* this will put all unused low memory onto the freelists */
Laura Abbott1c2f87c2014-04-13 22:54:58 +0100480 free_unused_memmap();
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -0700481 memblock_free_all();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483#ifdef CONFIG_SA1111
484 /* now that our DMA memory is actually so designated, we can free it */
Linus Torvaldsbfd65dd2013-07-13 14:58:36 -0700485 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486#endif
487
Russell Kingd0e775a2010-10-27 19:37:06 +0100488 free_highpages();
Nicolas Pitre3835f6c2008-09-17 15:21:55 -0400489
Jiang Liu2450c972013-07-03 15:03:48 -0700490 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100492 /*
493 * Check boundaries twice: Some fundamental inconsistencies can
494 * be detected at build time already.
495 */
496#ifdef CONFIG_MMU
Fenkart/Bostandzhyana1839272010-02-07 21:47:58 +0100497 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
498 BUG_ON(TASK_SIZE > MODULES_VADDR);
499#endif
500
501#ifdef CONFIG_HIGHMEM
502 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
503 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
504#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505}
506
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800507#ifdef CONFIG_STRICT_KERNEL_RWX
Kees Cook1e6b4812014-04-03 17:28:11 -0700508struct section_perm {
Kees Cook25362dc2016-01-26 01:19:36 +0100509 const char *name;
Kees Cook1e6b4812014-04-03 17:28:11 -0700510 unsigned long start;
511 unsigned long end;
512 pmdval_t mask;
513 pmdval_t prot;
Kees Cook80d6b0c2014-04-03 13:29:50 -0700514 pmdval_t clear;
Kees Cook1e6b4812014-04-03 17:28:11 -0700515};
516
Kees Cook64ac2e72016-01-26 01:20:21 +0100517/* First section-aligned location at or after __start_rodata. */
518extern char __start_rodata_section_aligned[];
519
Kees Cook80d6b0c2014-04-03 13:29:50 -0700520static struct section_perm nx_perms[] = {
Kees Cook1e6b4812014-04-03 17:28:11 -0700521 /* Make pages tables, etc before _stext RW (set NX). */
522 {
Kees Cook25362dc2016-01-26 01:19:36 +0100523 .name = "pre-text NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700524 .start = PAGE_OFFSET,
525 .end = (unsigned long)_stext,
526 .mask = ~PMD_SECT_XN,
527 .prot = PMD_SECT_XN,
528 },
529 /* Make init RW (set NX). */
530 {
Kees Cook25362dc2016-01-26 01:19:36 +0100531 .name = "init NX",
Kees Cook1e6b4812014-04-03 17:28:11 -0700532 .start = (unsigned long)__init_begin,
533 .end = (unsigned long)_sdata,
534 .mask = ~PMD_SECT_XN,
535 .prot = PMD_SECT_XN,
536 },
Kees Cook80d6b0c2014-04-03 13:29:50 -0700537 /* Make rodata NX (set RO in ro_perms below). */
538 {
Kees Cook25362dc2016-01-26 01:19:36 +0100539 .name = "rodata NX",
Kees Cook64ac2e72016-01-26 01:20:21 +0100540 .start = (unsigned long)__start_rodata_section_aligned,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700541 .end = (unsigned long)__init_begin,
542 .mask = ~PMD_SECT_XN,
543 .prot = PMD_SECT_XN,
544 },
Kees Cook1e6b4812014-04-03 17:28:11 -0700545};
546
Kees Cook80d6b0c2014-04-03 13:29:50 -0700547static struct section_perm ro_perms[] = {
548 /* Make kernel code and rodata RX (set RO). */
549 {
Kees Cook25362dc2016-01-26 01:19:36 +0100550 .name = "text/rodata RO",
Kees Cook80d6b0c2014-04-03 13:29:50 -0700551 .start = (unsigned long)_stext,
552 .end = (unsigned long)__init_begin,
553#ifdef CONFIG_ARM_LPAE
Philip Derrin400eeff2017-11-14 00:55:25 +0100554 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
555 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
Kees Cook80d6b0c2014-04-03 13:29:50 -0700556#else
557 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
558 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
559 .clear = PMD_SECT_AP_WRITE,
560#endif
561 },
562};
Kees Cook80d6b0c2014-04-03 13:29:50 -0700563
Kees Cook1e6b4812014-04-03 17:28:11 -0700564/*
565 * Updates section permissions only for the current mm (sections are
566 * copied into each mm). During startup, this is the init_mm. Is only
567 * safe to be called with preemption disabled, as under stop_machine().
568 */
569static inline void section_update(unsigned long addr, pmdval_t mask,
Laura Abbott08925c22015-11-30 19:36:28 +0100570 pmdval_t prot, struct mm_struct *mm)
Kees Cook1e6b4812014-04-03 17:28:11 -0700571{
Kees Cook1e6b4812014-04-03 17:28:11 -0700572 pmd_t *pmd;
573
Kees Cook1e6b4812014-04-03 17:28:11 -0700574 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
575
576#ifdef CONFIG_ARM_LPAE
577 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
578#else
579 if (addr & SECTION_SIZE)
580 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
581 else
582 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
583#endif
584 flush_pmd_entry(pmd);
585 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
586}
587
588/* Make sure extended page tables are in use. */
589static inline bool arch_has_strict_perms(void)
590{
591 if (cpu_architecture() < CPU_ARCH_ARMv6)
592 return false;
593
594 return !!(get_cr() & CR_XP);
595}
596
Ben Dooks (Codethink)ea5379b2019-10-11 13:51:52 +0100597static void set_section_perms(struct section_perm *perms, int n, bool set,
598 struct mm_struct *mm)
Laura Abbott08925c22015-11-30 19:36:28 +0100599{
600 size_t i;
601 unsigned long addr;
602
603 if (!arch_has_strict_perms())
604 return;
605
606 for (i = 0; i < n; i++) {
607 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
608 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
Kees Cook25362dc2016-01-26 01:19:36 +0100609 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
610 perms[i].name, perms[i].start, perms[i].end,
Laura Abbott08925c22015-11-30 19:36:28 +0100611 SECTION_SIZE);
612 continue;
613 }
614
615 for (addr = perms[i].start;
616 addr < perms[i].end;
617 addr += SECTION_SIZE)
618 section_update(addr, perms[i].mask,
619 set ? perms[i].prot : perms[i].clear, mm);
620 }
621
Kees Cook1e6b4812014-04-03 17:28:11 -0700622}
623
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100624/**
625 * update_sections_early intended to be called only through stop_machine
626 * framework and executed by only one CPU while all other CPUs will spin and
627 * wait, so no locking is required in this function.
628 */
Laura Abbott08925c22015-11-30 19:36:28 +0100629static void update_sections_early(struct section_perm perms[], int n)
Kees Cook1e6b4812014-04-03 17:28:11 -0700630{
Laura Abbott08925c22015-11-30 19:36:28 +0100631 struct task_struct *t, *s;
632
Laura Abbott08925c22015-11-30 19:36:28 +0100633 for_each_process(t) {
634 if (t->flags & PF_KTHREAD)
635 continue;
636 for_each_thread(t, s)
Doug Bergerc51bc122019-07-01 18:50:11 +0100637 if (s->mm)
638 set_section_perms(perms, n, true, s->mm);
Laura Abbott08925c22015-11-30 19:36:28 +0100639 }
Laura Abbott08925c22015-11-30 19:36:28 +0100640 set_section_perms(perms, n, true, current->active_mm);
641 set_section_perms(perms, n, true, &init_mm);
642}
643
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100644static int __fix_kernmem_perms(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100645{
646 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
647 return 0;
648}
649
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100650static void fix_kernmem_perms(void)
Laura Abbott08925c22015-11-30 19:36:28 +0100651{
652 stop_machine(__fix_kernmem_perms, NULL, NULL);
Kees Cook1e6b4812014-04-03 17:28:11 -0700653}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700654
Grygorii Strashko11ce4b32017-04-25 21:20:52 +0100655static int __mark_rodata_ro(void *unused)
Laura Abbott08925c22015-11-30 19:36:28 +0100656{
657 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
658 return 0;
659}
660
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100661static int kernel_set_to_readonly __read_mostly;
662
Kees Cook80d6b0c2014-04-03 13:29:50 -0700663void mark_rodata_ro(void)
664{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100665 kernel_set_to_readonly = 1;
Laura Abbott08925c22015-11-30 19:36:28 +0100666 stop_machine(__mark_rodata_ro, NULL, NULL);
Jinbum Parka8e53c12017-12-12 01:43:57 +0100667 debug_checkwx();
Kees Cook80d6b0c2014-04-03 13:29:50 -0700668}
669
670void set_kernel_text_rw(void)
671{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100672 if (!kernel_set_to_readonly)
673 return;
674
Laura Abbott08925c22015-11-30 19:36:28 +0100675 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
676 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700677}
678
679void set_kernel_text_ro(void)
680{
Steven Rostedt (VMware)b4c7e2b2018-07-10 08:22:40 +0100681 if (!kernel_set_to_readonly)
682 return;
683
Laura Abbott08925c22015-11-30 19:36:28 +0100684 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
685 current->active_mm);
Kees Cook80d6b0c2014-04-03 13:29:50 -0700686}
Kees Cook80d6b0c2014-04-03 13:29:50 -0700687
Kees Cook1e6b4812014-04-03 17:28:11 -0700688#else
689static inline void fix_kernmem_perms(void) { }
Laura Abbott0f5bf6d2017-02-06 16:31:58 -0800690#endif /* CONFIG_STRICT_KERNEL_RWX */
Kees Cook1e6b4812014-04-03 17:28:11 -0700691
Kees Cook1e6b4812014-04-03 17:28:11 -0700692void free_initmem(void)
693{
694 fix_kernmem_perms();
Linus Walleijbc581772009-09-15 17:30:37 +0100695
Stephen Boyd54d52572011-07-07 18:43:36 +0100696 poison_init_mem(__init_begin, __init_end - __init_begin);
Nicolas Pitre6db015e2008-09-17 14:50:42 -0400697 if (!machine_is_integrator() && !machine_is_cintegrator())
Jiang Liudbe67df2013-07-03 15:02:51 -0700698 free_initmem_default(-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699}
700
701#ifdef CONFIG_BLK_DEV_INITRD
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702void free_initrd_mem(unsigned long start, unsigned long end)
703{
Christoph Hellwigd8ae8a32019-05-13 17:18:30 -0700704 if (start == initrd_start)
705 start = round_down(start, PAGE_SIZE);
706 if (end == initrd_end)
707 end = round_up(end, PAGE_SIZE);
Yalin Wang421520b2014-09-26 03:07:09 +0100708
Christoph Hellwigd8ae8a32019-05-13 17:18:30 -0700709 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
710 free_reserved_area((void *)start, (void *)end, -1, "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712#endif