blob: 6322ec82ec1d693270316fc91f713f9ac97fa8a3 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001// SPDX-License-Identifier: GPL-2.0-only
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Anup Patel671f9a32019-06-28 13:36:21 -07004 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07005 */
6
7#include <linux/init.h>
8#include <linux/mm.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07009#include <linux/memblock.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070010#include <linux/initrd.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070011#include <linux/swap.h>
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010012#include <linux/sizes.h>
Anup Patel0651c262019-02-21 11:25:49 +053013#include <linux/of_fdt.h>
Albert Ou922b0372019-09-27 16:14:18 -070014#include <linux/libfdt.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070015
Anup Patelf2c17aa2019-01-07 20:57:01 +053016#include <asm/fixmap.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070017#include <asm/tlbflush.h>
18#include <asm/sections.h>
19#include <asm/pgtable.h>
20#include <asm/io.h>
21
Paul Walmsleyffaee272019-10-17 15:00:17 -070022#include "../kernel/head.h"
23
Anup Patel387181d2019-03-26 08:03:47 +000024unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
25 __page_aligned_bss;
26EXPORT_SYMBOL(empty_zero_page);
27
Anup Pateld90d45d2019-06-07 06:01:29 +000028extern char _start[];
Christoph Hellwig6bd33e12019-10-28 13:10:41 +010029void *dtb_early_va;
Anup Pateld90d45d2019-06-07 06:01:29 +000030
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070031static void __init zone_sizes_init(void)
32{
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010033 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070034
Zong Lid5fad482018-06-25 16:49:37 +080035#ifdef CONFIG_ZONE_DMA32
Guo Ren28198c42019-01-12 16:16:27 +080036 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
37 (unsigned long) PFN_PHYS(max_low_pfn)));
Zong Lid5fad482018-06-25 16:49:37 +080038#endif
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010039 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
40
41 free_area_init_nodes(max_zone_pfns);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070042}
43
Christoph Hellwig6bd33e12019-10-28 13:10:41 +010044static void setup_zero_page(void)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070045{
46 memset((void *)empty_zero_page, 0, PAGE_SIZE);
47}
48
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070049void __init mem_init(void)
50{
51#ifdef CONFIG_FLATMEM
52 BUG_ON(!mem_map);
53#endif /* CONFIG_FLATMEM */
54
55 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -070056 memblock_free_all();
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070057
58 mem_init_print_info(NULL);
59}
60
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070061#ifdef CONFIG_BLK_DEV_INITRD
Anup Patel0651c262019-02-21 11:25:49 +053062static void __init setup_initrd(void)
63{
64 unsigned long size;
65
66 if (initrd_start >= initrd_end) {
67 pr_info("initrd not found or empty");
68 goto disable;
69 }
70 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
71 pr_err("initrd extends beyond end of memory");
72 goto disable;
73 }
74
75 size = initrd_end - initrd_start;
76 memblock_reserve(__pa(initrd_start), size);
77 initrd_below_start_ok = 1;
78
79 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
80 (void *)(initrd_start), size);
81 return;
82disable:
83 pr_cont(" - disabling initrd\n");
84 initrd_start = 0;
85 initrd_end = 0;
86}
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070087#endif /* CONFIG_BLK_DEV_INITRD */
Anup Patel0651c262019-02-21 11:25:49 +053088
Albert Ou922b0372019-09-27 16:14:18 -070089static phys_addr_t dtb_early_pa __initdata;
90
Anup Patel0651c262019-02-21 11:25:49 +053091void __init setup_bootmem(void)
92{
93 struct memblock_region *reg;
94 phys_addr_t mem_size = 0;
Anup Pateld90d45d2019-06-07 06:01:29 +000095 phys_addr_t vmlinux_end = __pa(&_end);
96 phys_addr_t vmlinux_start = __pa(&_start);
Anup Patel0651c262019-02-21 11:25:49 +053097
98 /* Find the memory region containing the kernel */
99 for_each_memblock(memory, reg) {
Anup Patel0651c262019-02-21 11:25:49 +0530100 phys_addr_t end = reg->base + reg->size;
101
102 if (reg->base <= vmlinux_end && vmlinux_end <= end) {
Anup Patel0651c262019-02-21 11:25:49 +0530103 mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
Anup Patelf05badd2019-04-05 05:49:34 +0000104
105 /*
106 * Remove memblock from the end of usable area to the
107 * end of region
108 */
109 if (reg->base + mem_size < end)
110 memblock_remove(reg->base + mem_size,
111 end - reg->base - mem_size);
Anup Patel0651c262019-02-21 11:25:49 +0530112 }
113 }
114 BUG_ON(mem_size == 0);
115
Anup Pateld90d45d2019-06-07 06:01:29 +0000116 /* Reserve from the start of the kernel to the end of the kernel */
117 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
118
Anup Patel0651c262019-02-21 11:25:49 +0530119 set_max_mapnr(PFN_DOWN(mem_size));
120 max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
121
122#ifdef CONFIG_BLK_DEV_INITRD
123 setup_initrd();
124#endif /* CONFIG_BLK_DEV_INITRD */
125
Albert Ou922b0372019-09-27 16:14:18 -0700126 /*
127 * Avoid using early_init_fdt_reserve_self() since __pa() does
128 * not work for DTB pointers that are fixmap addresses
129 */
130 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
131
Anup Patel0651c262019-02-21 11:25:49 +0530132 early_init_fdt_scan_reserved_mem();
133 memblock_allow_resize();
134 memblock_dump_all();
135
136 for_each_memblock(memory, reg) {
137 unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
138 unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
139
140 memblock_set_node(PFN_PHYS(start_pfn),
141 PFN_PHYS(end_pfn - start_pfn),
142 &memblock.memory, 0);
143 }
144}
Anup Patel6f1e9e92019-02-13 16:38:36 +0530145
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100146#ifdef CONFIG_MMU
Anup Patel387181d2019-03-26 08:03:47 +0000147unsigned long va_pa_offset;
148EXPORT_SYMBOL(va_pa_offset);
149unsigned long pfn_base;
150EXPORT_SYMBOL(pfn_base);
151
Anup Patel6f1e9e92019-02-13 16:38:36 +0530152pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
Anup Patel671f9a32019-06-28 13:36:21 -0700153pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
Anup Patelf2c17aa2019-01-07 20:57:01 +0530154pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
Anup Patel671f9a32019-06-28 13:36:21 -0700155static bool mmu_enabled;
156
157#define MAX_EARLY_MAPPING_SIZE SZ_128M
158
159pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
Anup Patelf2c17aa2019-01-07 20:57:01 +0530160
161void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
162{
163 unsigned long addr = __fix_to_virt(idx);
164 pte_t *ptep;
165
166 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
167
168 ptep = &fixmap_pte[pte_index(addr)];
169
170 if (pgprot_val(prot)) {
171 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
172 } else {
173 pte_clear(&init_mm, addr, ptep);
174 local_flush_tlb_page(addr);
175 }
176}
177
Anup Patel671f9a32019-06-28 13:36:21 -0700178static pte_t *__init get_pte_virt(phys_addr_t pa)
179{
180 if (mmu_enabled) {
181 clear_fixmap(FIX_PTE);
182 return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
183 } else {
184 return (pte_t *)((uintptr_t)pa);
185 }
186}
187
188static phys_addr_t __init alloc_pte(uintptr_t va)
189{
190 /*
191 * We only create PMD or PGD early mappings so we
192 * should never reach here with MMU disabled.
193 */
194 BUG_ON(!mmu_enabled);
195
196 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
197}
198
199static void __init create_pte_mapping(pte_t *ptep,
200 uintptr_t va, phys_addr_t pa,
201 phys_addr_t sz, pgprot_t prot)
202{
203 uintptr_t pte_index = pte_index(va);
204
205 BUG_ON(sz != PAGE_SIZE);
206
207 if (pte_none(ptep[pte_index]))
208 ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
209}
210
211#ifndef __PAGETABLE_PMD_FOLDED
212
213pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
214pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
215
216#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
217#define NUM_EARLY_PMDS 1UL
218#else
219#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
220#endif
221pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
222
223static pmd_t *__init get_pmd_virt(phys_addr_t pa)
224{
225 if (mmu_enabled) {
226 clear_fixmap(FIX_PMD);
227 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
228 } else {
229 return (pmd_t *)((uintptr_t)pa);
230 }
231}
232
233static phys_addr_t __init alloc_pmd(uintptr_t va)
234{
235 uintptr_t pmd_num;
236
237 if (mmu_enabled)
238 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
239
240 pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
241 BUG_ON(pmd_num >= NUM_EARLY_PMDS);
242 return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
243}
244
245static void __init create_pmd_mapping(pmd_t *pmdp,
246 uintptr_t va, phys_addr_t pa,
247 phys_addr_t sz, pgprot_t prot)
248{
249 pte_t *ptep;
250 phys_addr_t pte_phys;
251 uintptr_t pmd_index = pmd_index(va);
252
253 if (sz == PMD_SIZE) {
254 if (pmd_none(pmdp[pmd_index]))
255 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
256 return;
257 }
258
259 if (pmd_none(pmdp[pmd_index])) {
260 pte_phys = alloc_pte(va);
261 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
262 ptep = get_pte_virt(pte_phys);
263 memset(ptep, 0, PAGE_SIZE);
264 } else {
265 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
266 ptep = get_pte_virt(pte_phys);
267 }
268
269 create_pte_mapping(ptep, va, pa, sz, prot);
270}
271
272#define pgd_next_t pmd_t
273#define alloc_pgd_next(__va) alloc_pmd(__va)
274#define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
275#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
276 create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
277#define PTE_PARENT_SIZE PMD_SIZE
278#define fixmap_pgd_next fixmap_pmd
279#else
280#define pgd_next_t pte_t
281#define alloc_pgd_next(__va) alloc_pte(__va)
282#define get_pgd_next_virt(__pa) get_pte_virt(__pa)
283#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
284 create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
285#define PTE_PARENT_SIZE PGDIR_SIZE
286#define fixmap_pgd_next fixmap_pte
287#endif
288
289static void __init create_pgd_mapping(pgd_t *pgdp,
290 uintptr_t va, phys_addr_t pa,
291 phys_addr_t sz, pgprot_t prot)
292{
293 pgd_next_t *nextp;
294 phys_addr_t next_phys;
295 uintptr_t pgd_index = pgd_index(va);
296
297 if (sz == PGDIR_SIZE) {
298 if (pgd_val(pgdp[pgd_index]) == 0)
299 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
300 return;
301 }
302
303 if (pgd_val(pgdp[pgd_index]) == 0) {
304 next_phys = alloc_pgd_next(va);
305 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
306 nextp = get_pgd_next_virt(next_phys);
307 memset(nextp, 0, PAGE_SIZE);
308 } else {
309 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
310 nextp = get_pgd_next_virt(next_phys);
311 }
312
313 create_pgd_next_mapping(nextp, va, pa, sz, prot);
314}
315
316static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
317{
318 uintptr_t map_size = PAGE_SIZE;
319
320 /* Upgrade to PMD/PGDIR mappings whenever possible */
321 if (!(base & (PTE_PARENT_SIZE - 1)) &&
322 !(size & (PTE_PARENT_SIZE - 1)))
323 map_size = PTE_PARENT_SIZE;
324
325 return map_size;
326}
327
Anup Patel387181d2019-03-26 08:03:47 +0000328/*
329 * setup_vm() is called from head.S with MMU-off.
330 *
331 * Following requirements should be honoured for setup_vm() to work
332 * correctly:
333 * 1) It should use PC-relative addressing for accessing kernel symbols.
334 * To achieve this we always use GCC cmodel=medany.
335 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
336 * so disable compiler instrumentation when FTRACE is enabled.
337 *
338 * Currently, the above requirements are honoured by using custom CFLAGS
339 * for init.o in mm/Makefile.
340 */
341
342#ifndef __riscv_cmodel_medany
Paul Walmsley6a527b62019-10-17 14:45:58 -0700343#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
Anup Patel387181d2019-03-26 08:03:47 +0000344#endif
345
Anup Patel671f9a32019-06-28 13:36:21 -0700346asmlinkage void __init setup_vm(uintptr_t dtb_pa)
Anup Patel6f1e9e92019-02-13 16:38:36 +0530347{
Anup Patel671f9a32019-06-28 13:36:21 -0700348 uintptr_t va, end_va;
349 uintptr_t load_pa = (uintptr_t)(&_start);
350 uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
351 uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530352
Anup Patel671f9a32019-06-28 13:36:21 -0700353 va_pa_offset = PAGE_OFFSET - load_pa;
354 pfn_base = PFN_DOWN(load_pa);
355
356 /*
357 * Enforce boot alignment requirements of RV32 and
358 * RV64 by only allowing PMD or PGD mappings.
359 */
360 BUG_ON(map_size == PAGE_SIZE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530361
362 /* Sanity check alignment and size */
363 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
Anup Patel671f9a32019-06-28 13:36:21 -0700364 BUG_ON((load_pa % map_size) != 0);
365 BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
366
367 /* Setup early PGD for fixmap */
368 create_pgd_mapping(early_pg_dir, FIXADDR_START,
369 (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530370
371#ifndef __PAGETABLE_PMD_FOLDED
Anup Patel671f9a32019-06-28 13:36:21 -0700372 /* Setup fixmap PMD */
373 create_pmd_mapping(fixmap_pmd, FIXADDR_START,
374 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
375 /* Setup trampoline PGD and PMD */
376 create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
377 (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
378 create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
379 load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530380#else
Anup Patel671f9a32019-06-28 13:36:21 -0700381 /* Setup trampoline PGD */
382 create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
383 load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
384#endif
Anup Patel6f1e9e92019-02-13 16:38:36 +0530385
Anup Patel671f9a32019-06-28 13:36:21 -0700386 /*
387 * Setup early PGD covering entire kernel which will allows
388 * us to reach paging_init(). We map all memory banks later
389 * in setup_vm_final() below.
390 */
391 end_va = PAGE_OFFSET + load_sz;
392 for (va = PAGE_OFFSET; va < end_va; va += map_size)
393 create_pgd_mapping(early_pg_dir, va,
394 load_pa + (va - PAGE_OFFSET),
395 map_size, PAGE_KERNEL_EXEC);
Anup Patelf2c17aa2019-01-07 20:57:01 +0530396
Anup Patel671f9a32019-06-28 13:36:21 -0700397 /* Create fixed mapping for early FDT parsing */
398 end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
399 for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
400 create_pte_mapping(fixmap_pte, va,
401 dtb_pa + (va - __fix_to_virt(FIX_FDT)),
402 PAGE_SIZE, PAGE_KERNEL);
403
404 /* Save pointer to DTB for early FDT parsing */
405 dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
Albert Ou922b0372019-09-27 16:14:18 -0700406 /* Save physical address for memblock reservation */
407 dtb_early_pa = dtb_pa;
Anup Patel671f9a32019-06-28 13:36:21 -0700408}
409
410static void __init setup_vm_final(void)
411{
412 uintptr_t va, map_size;
413 phys_addr_t pa, start, end;
414 struct memblock_region *reg;
415
416 /* Set mmu_enabled flag */
417 mmu_enabled = true;
418
419 /* Setup swapper PGD for fixmap */
420 create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
421 __pa(fixmap_pgd_next),
422 PGDIR_SIZE, PAGE_TABLE);
423
424 /* Map all memory banks */
425 for_each_memblock(memory, reg) {
426 start = reg->base;
427 end = start + reg->size;
428
429 if (start >= end)
430 break;
431 if (memblock_is_nomap(reg))
432 continue;
433 if (start <= __pa(PAGE_OFFSET) &&
434 __pa(PAGE_OFFSET) < end)
435 start = __pa(PAGE_OFFSET);
436
437 map_size = best_map_size(start, end - start);
438 for (pa = start; pa < end; pa += map_size) {
439 va = (uintptr_t)__va(pa);
440 create_pgd_mapping(swapper_pg_dir, va, pa,
441 map_size, PAGE_KERNEL_EXEC);
442 }
Anup Patel6f1e9e92019-02-13 16:38:36 +0530443 }
Anup Patelf2c17aa2019-01-07 20:57:01 +0530444
Anup Patel671f9a32019-06-28 13:36:21 -0700445 /* Clear fixmap PTE and PMD mappings */
446 clear_fixmap(FIX_PTE);
447 clear_fixmap(FIX_PMD);
448
449 /* Move to swapper page table */
Bin Meng4f3f9002019-08-07 09:13:38 -0700450 csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
Anup Patel671f9a32019-06-28 13:36:21 -0700451 local_flush_tlb_all();
452}
Christoph Hellwig6bd33e12019-10-28 13:10:41 +0100453#else
454asmlinkage void __init setup_vm(uintptr_t dtb_pa)
455{
456 dtb_early_va = (void *)dtb_pa;
457}
458
459static inline void setup_vm_final(void)
460{
461}
462#endif /* CONFIG_MMU */
Anup Patel671f9a32019-06-28 13:36:21 -0700463
464void __init paging_init(void)
465{
466 setup_vm_final();
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600467 memblocks_present();
468 sparse_init();
Anup Patel671f9a32019-06-28 13:36:21 -0700469 setup_zero_page();
470 zone_sizes_init();
Anup Patel6f1e9e92019-02-13 16:38:36 +0530471}
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600472
Kefeng Wang9fe57d82019-10-23 11:23:02 +0800473#ifdef CONFIG_SPARSEMEM_VMEMMAP
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600474int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
475 struct vmem_altmap *altmap)
476{
477 return vmemmap_populate_basepages(start, end, node);
478}
479#endif