blob: e83df7a2493869fadbc5a5f5cfccf36ac4a911f8 [file] [log] [blame]
Thomas Gleixner50acfb22019-05-29 07:18:00 -07001// SPDX-License-Identifier: GPL-2.0-only
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * Copyright (C) 2012 Regents of the University of California
Anup Patel671f9a32019-06-28 13:36:21 -07004 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07005 */
6
7#include <linux/init.h>
8#include <linux/mm.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07009#include <linux/memblock.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070010#include <linux/initrd.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070011#include <linux/swap.h>
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010012#include <linux/sizes.h>
Anup Patel0651c262019-02-21 11:25:49 +053013#include <linux/of_fdt.h>
Albert Ou922b0372019-09-27 16:14:18 -070014#include <linux/libfdt.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070015
Anup Patelf2c17aa2019-01-07 20:57:01 +053016#include <asm/fixmap.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070017#include <asm/tlbflush.h>
18#include <asm/sections.h>
19#include <asm/pgtable.h>
20#include <asm/io.h>
21
Paul Walmsleyffaee272019-10-17 15:00:17 -070022#include "../kernel/head.h"
23
Anup Patel387181d2019-03-26 08:03:47 +000024unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
25 __page_aligned_bss;
26EXPORT_SYMBOL(empty_zero_page);
27
Anup Pateld90d45d2019-06-07 06:01:29 +000028extern char _start[];
29
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070030static void __init zone_sizes_init(void)
31{
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010032 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070033
Zong Lid5fad482018-06-25 16:49:37 +080034#ifdef CONFIG_ZONE_DMA32
Guo Ren28198c42019-01-12 16:16:27 +080035 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
36 (unsigned long) PFN_PHYS(max_low_pfn)));
Zong Lid5fad482018-06-25 16:49:37 +080037#endif
Christoph Hellwig5ec9c4f2018-01-16 09:37:50 +010038 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
39
40 free_area_init_nodes(max_zone_pfns);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070041}
42
43void setup_zero_page(void)
44{
45 memset((void *)empty_zero_page, 0, PAGE_SIZE);
46}
47
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070048void __init mem_init(void)
49{
50#ifdef CONFIG_FLATMEM
51 BUG_ON(!mem_map);
52#endif /* CONFIG_FLATMEM */
53
54 high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -070055 memblock_free_all();
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070056
57 mem_init_print_info(NULL);
58}
59
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070060#ifdef CONFIG_BLK_DEV_INITRD
Anup Patel0651c262019-02-21 11:25:49 +053061static void __init setup_initrd(void)
62{
63 unsigned long size;
64
65 if (initrd_start >= initrd_end) {
66 pr_info("initrd not found or empty");
67 goto disable;
68 }
69 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
70 pr_err("initrd extends beyond end of memory");
71 goto disable;
72 }
73
74 size = initrd_end - initrd_start;
75 memblock_reserve(__pa(initrd_start), size);
76 initrd_below_start_ok = 1;
77
78 pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
79 (void *)(initrd_start), size);
80 return;
81disable:
82 pr_cont(" - disabling initrd\n");
83 initrd_start = 0;
84 initrd_end = 0;
85}
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070086#endif /* CONFIG_BLK_DEV_INITRD */
Anup Patel0651c262019-02-21 11:25:49 +053087
Albert Ou922b0372019-09-27 16:14:18 -070088static phys_addr_t dtb_early_pa __initdata;
89
Anup Patel0651c262019-02-21 11:25:49 +053090void __init setup_bootmem(void)
91{
92 struct memblock_region *reg;
93 phys_addr_t mem_size = 0;
Anup Pateld90d45d2019-06-07 06:01:29 +000094 phys_addr_t vmlinux_end = __pa(&_end);
95 phys_addr_t vmlinux_start = __pa(&_start);
Anup Patel0651c262019-02-21 11:25:49 +053096
97 /* Find the memory region containing the kernel */
98 for_each_memblock(memory, reg) {
Anup Patel0651c262019-02-21 11:25:49 +053099 phys_addr_t end = reg->base + reg->size;
100
101 if (reg->base <= vmlinux_end && vmlinux_end <= end) {
Anup Patel0651c262019-02-21 11:25:49 +0530102 mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
Anup Patelf05badd2019-04-05 05:49:34 +0000103
104 /*
105 * Remove memblock from the end of usable area to the
106 * end of region
107 */
108 if (reg->base + mem_size < end)
109 memblock_remove(reg->base + mem_size,
110 end - reg->base - mem_size);
Anup Patel0651c262019-02-21 11:25:49 +0530111 }
112 }
113 BUG_ON(mem_size == 0);
114
Anup Pateld90d45d2019-06-07 06:01:29 +0000115 /* Reserve from the start of the kernel to the end of the kernel */
116 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
117
Anup Patel0651c262019-02-21 11:25:49 +0530118 set_max_mapnr(PFN_DOWN(mem_size));
119 max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
120
121#ifdef CONFIG_BLK_DEV_INITRD
122 setup_initrd();
123#endif /* CONFIG_BLK_DEV_INITRD */
124
Albert Ou922b0372019-09-27 16:14:18 -0700125 /*
126 * Avoid using early_init_fdt_reserve_self() since __pa() does
127 * not work for DTB pointers that are fixmap addresses
128 */
129 memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
130
Anup Patel0651c262019-02-21 11:25:49 +0530131 early_init_fdt_scan_reserved_mem();
132 memblock_allow_resize();
133 memblock_dump_all();
134
135 for_each_memblock(memory, reg) {
136 unsigned long start_pfn = memblock_region_memory_base_pfn(reg);
137 unsigned long end_pfn = memblock_region_memory_end_pfn(reg);
138
139 memblock_set_node(PFN_PHYS(start_pfn),
140 PFN_PHYS(end_pfn - start_pfn),
141 &memblock.memory, 0);
142 }
143}
Anup Patel6f1e9e92019-02-13 16:38:36 +0530144
Anup Patel387181d2019-03-26 08:03:47 +0000145unsigned long va_pa_offset;
146EXPORT_SYMBOL(va_pa_offset);
147unsigned long pfn_base;
148EXPORT_SYMBOL(pfn_base);
149
Anup Patel671f9a32019-06-28 13:36:21 -0700150void *dtb_early_va;
Anup Patel6f1e9e92019-02-13 16:38:36 +0530151pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
Anup Patel671f9a32019-06-28 13:36:21 -0700152pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
Anup Patelf2c17aa2019-01-07 20:57:01 +0530153pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
Anup Patel671f9a32019-06-28 13:36:21 -0700154static bool mmu_enabled;
155
156#define MAX_EARLY_MAPPING_SIZE SZ_128M
157
158pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
Anup Patelf2c17aa2019-01-07 20:57:01 +0530159
160void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
161{
162 unsigned long addr = __fix_to_virt(idx);
163 pte_t *ptep;
164
165 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
166
167 ptep = &fixmap_pte[pte_index(addr)];
168
169 if (pgprot_val(prot)) {
170 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
171 } else {
172 pte_clear(&init_mm, addr, ptep);
173 local_flush_tlb_page(addr);
174 }
175}
176
Anup Patel671f9a32019-06-28 13:36:21 -0700177static pte_t *__init get_pte_virt(phys_addr_t pa)
178{
179 if (mmu_enabled) {
180 clear_fixmap(FIX_PTE);
181 return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
182 } else {
183 return (pte_t *)((uintptr_t)pa);
184 }
185}
186
187static phys_addr_t __init alloc_pte(uintptr_t va)
188{
189 /*
190 * We only create PMD or PGD early mappings so we
191 * should never reach here with MMU disabled.
192 */
193 BUG_ON(!mmu_enabled);
194
195 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
196}
197
198static void __init create_pte_mapping(pte_t *ptep,
199 uintptr_t va, phys_addr_t pa,
200 phys_addr_t sz, pgprot_t prot)
201{
202 uintptr_t pte_index = pte_index(va);
203
204 BUG_ON(sz != PAGE_SIZE);
205
206 if (pte_none(ptep[pte_index]))
207 ptep[pte_index] = pfn_pte(PFN_DOWN(pa), prot);
208}
209
210#ifndef __PAGETABLE_PMD_FOLDED
211
212pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
213pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
214
215#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
216#define NUM_EARLY_PMDS 1UL
217#else
218#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
219#endif
220pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
221
222static pmd_t *__init get_pmd_virt(phys_addr_t pa)
223{
224 if (mmu_enabled) {
225 clear_fixmap(FIX_PMD);
226 return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
227 } else {
228 return (pmd_t *)((uintptr_t)pa);
229 }
230}
231
232static phys_addr_t __init alloc_pmd(uintptr_t va)
233{
234 uintptr_t pmd_num;
235
236 if (mmu_enabled)
237 return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
238
239 pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
240 BUG_ON(pmd_num >= NUM_EARLY_PMDS);
241 return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
242}
243
244static void __init create_pmd_mapping(pmd_t *pmdp,
245 uintptr_t va, phys_addr_t pa,
246 phys_addr_t sz, pgprot_t prot)
247{
248 pte_t *ptep;
249 phys_addr_t pte_phys;
250 uintptr_t pmd_index = pmd_index(va);
251
252 if (sz == PMD_SIZE) {
253 if (pmd_none(pmdp[pmd_index]))
254 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pa), prot);
255 return;
256 }
257
258 if (pmd_none(pmdp[pmd_index])) {
259 pte_phys = alloc_pte(va);
260 pmdp[pmd_index] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
261 ptep = get_pte_virt(pte_phys);
262 memset(ptep, 0, PAGE_SIZE);
263 } else {
264 pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_index]));
265 ptep = get_pte_virt(pte_phys);
266 }
267
268 create_pte_mapping(ptep, va, pa, sz, prot);
269}
270
271#define pgd_next_t pmd_t
272#define alloc_pgd_next(__va) alloc_pmd(__va)
273#define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
274#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
275 create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
Anup Patel671f9a32019-06-28 13:36:21 -0700276#define fixmap_pgd_next fixmap_pmd
277#else
278#define pgd_next_t pte_t
279#define alloc_pgd_next(__va) alloc_pte(__va)
280#define get_pgd_next_virt(__pa) get_pte_virt(__pa)
281#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
282 create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
Anup Patel671f9a32019-06-28 13:36:21 -0700283#define fixmap_pgd_next fixmap_pte
284#endif
285
286static void __init create_pgd_mapping(pgd_t *pgdp,
287 uintptr_t va, phys_addr_t pa,
288 phys_addr_t sz, pgprot_t prot)
289{
290 pgd_next_t *nextp;
291 phys_addr_t next_phys;
292 uintptr_t pgd_index = pgd_index(va);
293
294 if (sz == PGDIR_SIZE) {
295 if (pgd_val(pgdp[pgd_index]) == 0)
296 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(pa), prot);
297 return;
298 }
299
300 if (pgd_val(pgdp[pgd_index]) == 0) {
301 next_phys = alloc_pgd_next(va);
302 pgdp[pgd_index] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
303 nextp = get_pgd_next_virt(next_phys);
304 memset(nextp, 0, PAGE_SIZE);
305 } else {
306 next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_index]));
307 nextp = get_pgd_next_virt(next_phys);
308 }
309
310 create_pgd_next_mapping(nextp, va, pa, sz, prot);
311}
312
313static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
314{
Zong Li0fdc6362019-11-08 01:00:40 -0800315 /* Upgrade to PMD_SIZE mappings whenever possible */
316 if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
317 return PAGE_SIZE;
Anup Patel671f9a32019-06-28 13:36:21 -0700318
Zong Li0fdc6362019-11-08 01:00:40 -0800319 return PMD_SIZE;
Anup Patel671f9a32019-06-28 13:36:21 -0700320}
321
Anup Patel387181d2019-03-26 08:03:47 +0000322/*
323 * setup_vm() is called from head.S with MMU-off.
324 *
325 * Following requirements should be honoured for setup_vm() to work
326 * correctly:
327 * 1) It should use PC-relative addressing for accessing kernel symbols.
328 * To achieve this we always use GCC cmodel=medany.
329 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
330 * so disable compiler instrumentation when FTRACE is enabled.
331 *
332 * Currently, the above requirements are honoured by using custom CFLAGS
333 * for init.o in mm/Makefile.
334 */
335
336#ifndef __riscv_cmodel_medany
Paul Walmsley6a527b62019-10-17 14:45:58 -0700337#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
Anup Patel387181d2019-03-26 08:03:47 +0000338#endif
339
Anup Patel671f9a32019-06-28 13:36:21 -0700340asmlinkage void __init setup_vm(uintptr_t dtb_pa)
Anup Patel6f1e9e92019-02-13 16:38:36 +0530341{
Anup Patel671f9a32019-06-28 13:36:21 -0700342 uintptr_t va, end_va;
343 uintptr_t load_pa = (uintptr_t)(&_start);
344 uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
345 uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530346
Anup Patel671f9a32019-06-28 13:36:21 -0700347 va_pa_offset = PAGE_OFFSET - load_pa;
348 pfn_base = PFN_DOWN(load_pa);
349
350 /*
351 * Enforce boot alignment requirements of RV32 and
352 * RV64 by only allowing PMD or PGD mappings.
353 */
354 BUG_ON(map_size == PAGE_SIZE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530355
356 /* Sanity check alignment and size */
357 BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
Anup Patel671f9a32019-06-28 13:36:21 -0700358 BUG_ON((load_pa % map_size) != 0);
359 BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
360
361 /* Setup early PGD for fixmap */
362 create_pgd_mapping(early_pg_dir, FIXADDR_START,
363 (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530364
365#ifndef __PAGETABLE_PMD_FOLDED
Anup Patel671f9a32019-06-28 13:36:21 -0700366 /* Setup fixmap PMD */
367 create_pmd_mapping(fixmap_pmd, FIXADDR_START,
368 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
369 /* Setup trampoline PGD and PMD */
370 create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
371 (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
372 create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
373 load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
Anup Patel6f1e9e92019-02-13 16:38:36 +0530374#else
Anup Patel671f9a32019-06-28 13:36:21 -0700375 /* Setup trampoline PGD */
376 create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
377 load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
378#endif
Anup Patel6f1e9e92019-02-13 16:38:36 +0530379
Anup Patel671f9a32019-06-28 13:36:21 -0700380 /*
381 * Setup early PGD covering entire kernel which will allows
382 * us to reach paging_init(). We map all memory banks later
383 * in setup_vm_final() below.
384 */
385 end_va = PAGE_OFFSET + load_sz;
386 for (va = PAGE_OFFSET; va < end_va; va += map_size)
387 create_pgd_mapping(early_pg_dir, va,
388 load_pa + (va - PAGE_OFFSET),
389 map_size, PAGE_KERNEL_EXEC);
Anup Patelf2c17aa2019-01-07 20:57:01 +0530390
Anup Patel671f9a32019-06-28 13:36:21 -0700391 /* Create fixed mapping for early FDT parsing */
392 end_va = __fix_to_virt(FIX_FDT) + FIX_FDT_SIZE;
393 for (va = __fix_to_virt(FIX_FDT); va < end_va; va += PAGE_SIZE)
394 create_pte_mapping(fixmap_pte, va,
395 dtb_pa + (va - __fix_to_virt(FIX_FDT)),
396 PAGE_SIZE, PAGE_KERNEL);
397
398 /* Save pointer to DTB for early FDT parsing */
399 dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
Albert Ou922b0372019-09-27 16:14:18 -0700400 /* Save physical address for memblock reservation */
401 dtb_early_pa = dtb_pa;
Anup Patel671f9a32019-06-28 13:36:21 -0700402}
403
404static void __init setup_vm_final(void)
405{
406 uintptr_t va, map_size;
407 phys_addr_t pa, start, end;
408 struct memblock_region *reg;
409
410 /* Set mmu_enabled flag */
411 mmu_enabled = true;
412
413 /* Setup swapper PGD for fixmap */
414 create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
415 __pa(fixmap_pgd_next),
416 PGDIR_SIZE, PAGE_TABLE);
417
418 /* Map all memory banks */
419 for_each_memblock(memory, reg) {
420 start = reg->base;
421 end = start + reg->size;
422
423 if (start >= end)
424 break;
425 if (memblock_is_nomap(reg))
426 continue;
427 if (start <= __pa(PAGE_OFFSET) &&
428 __pa(PAGE_OFFSET) < end)
429 start = __pa(PAGE_OFFSET);
430
431 map_size = best_map_size(start, end - start);
432 for (pa = start; pa < end; pa += map_size) {
433 va = (uintptr_t)__va(pa);
434 create_pgd_mapping(swapper_pg_dir, va, pa,
435 map_size, PAGE_KERNEL_EXEC);
436 }
Anup Patel6f1e9e92019-02-13 16:38:36 +0530437 }
Anup Patelf2c17aa2019-01-07 20:57:01 +0530438
Anup Patel671f9a32019-06-28 13:36:21 -0700439 /* Clear fixmap PTE and PMD mappings */
440 clear_fixmap(FIX_PTE);
441 clear_fixmap(FIX_PMD);
442
443 /* Move to swapper page table */
Bin Meng4f3f9002019-08-07 09:13:38 -0700444 csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
Anup Patel671f9a32019-06-28 13:36:21 -0700445 local_flush_tlb_all();
446}
447
448void __init paging_init(void)
449{
450 setup_vm_final();
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600451 memblocks_present();
452 sparse_init();
Anup Patel671f9a32019-06-28 13:36:21 -0700453 setup_zero_page();
454 zone_sizes_init();
Anup Patel6f1e9e92019-02-13 16:38:36 +0530455}
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600456
Kefeng Wang9fe57d82019-10-23 11:23:02 +0800457#ifdef CONFIG_SPARSEMEM_VMEMMAP
Logan Gunthorped95f1a52019-08-28 15:40:54 -0600458int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
459 struct vmem_altmap *altmap)
460{
461 return vmemmap_populate_basepages(start, end, node);
462}
463#endif