Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 4 | * Copyright (C) 2019 Western Digital Corporation or its affiliates. |
Nick Kossifidis | e53d281 | 2021-04-19 03:55:38 +0300 | [diff] [blame] | 5 | * Copyright (C) 2020 FORTH-ICS/CARV |
| 6 | * Nick Kossifidis <mick@ics.forth.gr> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/mm.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 11 | #include <linux/memblock.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 12 | #include <linux/initrd.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 13 | #include <linux/swap.h> |
Kefeng Wang | ce3aca0 | 2021-06-02 16:55:16 +0800 | [diff] [blame] | 14 | #include <linux/swiotlb.h> |
Christoph Hellwig | 5ec9c4f | 2018-01-16 09:37:50 +0100 | [diff] [blame] | 15 | #include <linux/sizes.h> |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 16 | #include <linux/of_fdt.h> |
Nick Kossifidis | 5640975 | 2021-04-19 03:55:39 +0300 | [diff] [blame] | 17 | #include <linux/of_reserved_mem.h> |
Albert Ou | 922b037 | 2019-09-27 16:14:18 -0700 | [diff] [blame] | 18 | #include <linux/libfdt.h> |
Zong Li | d27c3c9 | 2020-03-10 00:55:41 +0800 | [diff] [blame] | 19 | #include <linux/set_memory.h> |
Kefeng Wang | da81558 | 2020-10-31 14:01:12 +0800 | [diff] [blame] | 20 | #include <linux/dma-map-ops.h> |
Nick Kossifidis | e53d281 | 2021-04-19 03:55:38 +0300 | [diff] [blame] | 21 | #include <linux/crash_dump.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 22 | |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 23 | #include <asm/fixmap.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 24 | #include <asm/tlbflush.h> |
| 25 | #include <asm/sections.h> |
Palmer Dabbelt | 2d26825 | 2020-04-14 13:43:24 +0900 | [diff] [blame] | 26 | #include <asm/soc.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 27 | #include <asm/io.h> |
Zong Li | b422d28 | 2020-06-03 16:03:55 -0700 | [diff] [blame] | 28 | #include <asm/ptdump.h> |
Atish Patra | 4f0e8ee | 2020-11-18 16:38:29 -0800 | [diff] [blame] | 29 | #include <asm/numa.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 30 | |
Paul Walmsley | ffaee27 | 2019-10-17 15:00:17 -0700 | [diff] [blame] | 31 | #include "../kernel/head.h" |
| 32 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 33 | struct kernel_mapping kernel_map __ro_after_init; |
| 34 | EXPORT_SYMBOL(kernel_map); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 35 | #ifdef CONFIG_XIP_KERNEL |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 36 | #define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map)) |
| 37 | #endif |
| 38 | |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 39 | phys_addr_t phys_ram_base __ro_after_init; |
| 40 | EXPORT_SYMBOL(phys_ram_base); |
| 41 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 42 | #ifdef CONFIG_XIP_KERNEL |
Kefeng Wang | 50bae95 | 2021-05-14 17:49:08 +0800 | [diff] [blame] | 43 | extern char _xiprom[], _exiprom[]; |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 44 | #endif |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 45 | |
Anup Patel | 387181d | 2019-03-26 08:03:47 +0000 | [diff] [blame] | 46 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
| 47 | __page_aligned_bss; |
| 48 | EXPORT_SYMBOL(empty_zero_page); |
| 49 | |
Anup Patel | d90d45d | 2019-06-07 06:01:29 +0000 | [diff] [blame] | 50 | extern char _start[]; |
Anup Patel | 8f3a2b4 | 2020-09-17 15:37:10 -0700 | [diff] [blame] | 51 | #define DTB_EARLY_BASE_VA PGDIR_SIZE |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 52 | void *_dtb_early_va __initdata; |
| 53 | uintptr_t _dtb_early_pa __initdata; |
Anup Patel | d90d45d | 2019-06-07 06:01:29 +0000 | [diff] [blame] | 54 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 55 | struct pt_alloc_ops { |
| 56 | pte_t *(*get_pte_virt)(phys_addr_t pa); |
| 57 | phys_addr_t (*alloc_pte)(uintptr_t va); |
| 58 | #ifndef __PAGETABLE_PMD_FOLDED |
| 59 | pmd_t *(*get_pmd_virt)(phys_addr_t pa); |
| 60 | phys_addr_t (*alloc_pmd)(uintptr_t va); |
| 61 | #endif |
| 62 | }; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 63 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 64 | static phys_addr_t dma32_phys_limit __initdata; |
Kefeng Wang | da81558 | 2020-10-31 14:01:12 +0800 | [diff] [blame] | 65 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 66 | static void __init zone_sizes_init(void) |
| 67 | { |
Christoph Hellwig | 5ec9c4f | 2018-01-16 09:37:50 +0100 | [diff] [blame] | 68 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 69 | |
Zong Li | d5fad48 | 2018-06-25 16:49:37 +0800 | [diff] [blame] | 70 | #ifdef CONFIG_ZONE_DMA32 |
Kefeng Wang | da81558 | 2020-10-31 14:01:12 +0800 | [diff] [blame] | 71 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); |
Zong Li | d5fad48 | 2018-06-25 16:49:37 +0800 | [diff] [blame] | 72 | #endif |
Christoph Hellwig | 5ec9c4f | 2018-01-16 09:37:50 +0100 | [diff] [blame] | 73 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
| 74 | |
Mike Rapoport | 9691a07 | 2020-06-03 15:57:10 -0700 | [diff] [blame] | 75 | free_area_init(max_zone_pfns); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Kefeng Wang | 8fa3cdf | 2020-05-14 19:53:35 +0800 | [diff] [blame] | 78 | #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) |
Yash Shah | 2cc6c4a | 2019-11-18 05:58:34 +0000 | [diff] [blame] | 79 | static inline void print_mlk(char *name, unsigned long b, unsigned long t) |
| 80 | { |
| 81 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t, |
| 82 | (((t) - (b)) >> 10)); |
| 83 | } |
| 84 | |
| 85 | static inline void print_mlm(char *name, unsigned long b, unsigned long t) |
| 86 | { |
| 87 | pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t, |
| 88 | (((t) - (b)) >> 20)); |
| 89 | } |
| 90 | |
Jisheng Zhang | 1987501 | 2021-03-30 02:22:21 +0800 | [diff] [blame] | 91 | static void __init print_vm_layout(void) |
Yash Shah | 2cc6c4a | 2019-11-18 05:58:34 +0000 | [diff] [blame] | 92 | { |
| 93 | pr_notice("Virtual kernel memory layout:\n"); |
| 94 | print_mlk("fixmap", (unsigned long)FIXADDR_START, |
| 95 | (unsigned long)FIXADDR_TOP); |
| 96 | print_mlm("pci io", (unsigned long)PCI_IO_START, |
| 97 | (unsigned long)PCI_IO_END); |
| 98 | print_mlm("vmemmap", (unsigned long)VMEMMAP_START, |
| 99 | (unsigned long)VMEMMAP_END); |
| 100 | print_mlm("vmalloc", (unsigned long)VMALLOC_START, |
| 101 | (unsigned long)VMALLOC_END); |
| 102 | print_mlm("lowmem", (unsigned long)PAGE_OFFSET, |
| 103 | (unsigned long)high_memory); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 104 | #ifdef CONFIG_64BIT |
| 105 | print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR, |
| 106 | (unsigned long)ADDRESS_SPACE_END); |
| 107 | #endif |
Yash Shah | 2cc6c4a | 2019-11-18 05:58:34 +0000 | [diff] [blame] | 108 | } |
| 109 | #else |
| 110 | static void print_vm_layout(void) { } |
| 111 | #endif /* CONFIG_DEBUG_VM */ |
| 112 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 113 | void __init mem_init(void) |
| 114 | { |
| 115 | #ifdef CONFIG_FLATMEM |
| 116 | BUG_ON(!mem_map); |
| 117 | #endif /* CONFIG_FLATMEM */ |
| 118 | |
Kefeng Wang | ce3aca0 | 2021-06-02 16:55:16 +0800 | [diff] [blame] | 119 | #ifdef CONFIG_SWIOTLB |
| 120 | if (swiotlb_force == SWIOTLB_FORCE || |
| 121 | max_pfn > PFN_DOWN(dma32_phys_limit)) |
| 122 | swiotlb_init(1); |
| 123 | else |
| 124 | swiotlb_force = SWIOTLB_NO_FORCE; |
| 125 | #endif |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 126 | high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); |
Mike Rapoport | c6ffc5c | 2018-10-30 15:09:30 -0700 | [diff] [blame] | 127 | memblock_free_all(); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 128 | |
Yash Shah | 2cc6c4a | 2019-11-18 05:58:34 +0000 | [diff] [blame] | 129 | print_vm_layout(); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 130 | } |
| 131 | |
Kefeng Wang | c9811e3 | 2021-06-02 16:55:17 +0800 | [diff] [blame] | 132 | /* |
Alexandre Ghiti | c09dc9e | 2021-06-29 11:13:46 +0200 | [diff] [blame] | 133 | * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel, |
| 134 | * whereas for 64-bit kernel, the end of the virtual address space is occupied |
| 135 | * by the modules/BPF/kernel mappings which reduces the available size of the |
| 136 | * linear mapping. |
| 137 | * Limit the memory size via mem. |
Kefeng Wang | c9811e3 | 2021-06-02 16:55:17 +0800 | [diff] [blame] | 138 | */ |
Alexandre Ghiti | c09dc9e | 2021-06-29 11:13:46 +0200 | [diff] [blame] | 139 | #ifdef CONFIG_64BIT |
| 140 | static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G; |
| 141 | #else |
Kefeng Wang | c9811e3 | 2021-06-02 16:55:17 +0800 | [diff] [blame] | 142 | static phys_addr_t memory_limit = -PAGE_OFFSET; |
Alexandre Ghiti | c09dc9e | 2021-06-29 11:13:46 +0200 | [diff] [blame] | 143 | #endif |
Kefeng Wang | c9811e3 | 2021-06-02 16:55:17 +0800 | [diff] [blame] | 144 | |
| 145 | static int __init early_mem(char *p) |
| 146 | { |
| 147 | u64 size; |
| 148 | |
| 149 | if (!p) |
| 150 | return 1; |
| 151 | |
| 152 | size = memparse(p, &p) & PAGE_MASK; |
| 153 | memory_limit = min_t(u64, size, memory_limit); |
| 154 | |
| 155 | pr_notice("Memory limited to %lldMB\n", (u64)memory_limit >> 20); |
| 156 | |
| 157 | return 0; |
| 158 | } |
| 159 | early_param("mem", early_mem); |
| 160 | |
Kefeng Wang | f842f5f | 2021-05-10 19:42:22 +0800 | [diff] [blame] | 161 | static void __init setup_bootmem(void) |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 162 | { |
Zong Li | ac51e00 | 2020-01-02 11:12:40 +0800 | [diff] [blame] | 163 | phys_addr_t vmlinux_end = __pa_symbol(&_end); |
| 164 | phys_addr_t vmlinux_start = __pa_symbol(&_start); |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 165 | phys_addr_t __maybe_unused max_mapped_addr; |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 166 | phys_addr_t phys_ram_end; |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 167 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 168 | #ifdef CONFIG_XIP_KERNEL |
| 169 | vmlinux_start = __pa_symbol(&_sdata); |
| 170 | #endif |
| 171 | |
Kefeng Wang | c9811e3 | 2021-06-02 16:55:17 +0800 | [diff] [blame] | 172 | memblock_enforce_memory_limit(memory_limit); |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 173 | |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 174 | /* |
| 175 | * Reserve from the start of the kernel to the end of the kernel |
Geert Uytterhoeven | 8db6f93 | 2021-04-29 17:05:00 +0200 | [diff] [blame] | 176 | */ |
| 177 | #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) |
| 178 | /* |
| 179 | * Make sure we align the reservation on PMD_SIZE since we will |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 180 | * map the kernel in the linear mapping as read-only: we do not want |
| 181 | * any allocation to happen between _end and the next pmd aligned page. |
| 182 | */ |
Geert Uytterhoeven | 8db6f93 | 2021-04-29 17:05:00 +0200 | [diff] [blame] | 183 | vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; |
| 184 | #endif |
| 185 | memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); |
Anup Patel | d90d45d | 2019-06-07 06:01:29 +0000 | [diff] [blame] | 186 | |
Bin Meng | d0e4dae | 2021-06-27 21:51:17 +0800 | [diff] [blame] | 187 | |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 188 | phys_ram_end = memblock_end_of_DRAM(); |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 189 | #ifndef CONFIG_64BIT |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 190 | #ifndef CONFIG_XIP_KERNEL |
| 191 | phys_ram_base = memblock_start_of_DRAM(); |
| 192 | #endif |
Atish Patra | abb8e86 | 2021-01-11 15:45:02 -0800 | [diff] [blame] | 193 | /* |
| 194 | * memblock allocator is not aware of the fact that last 4K bytes of |
| 195 | * the addressable memory can not be mapped because of IS_ERR_VALUE |
| 196 | * macro. Make sure that last 4k bytes are not usable by memblock |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 197 | * if end of dram is equal to maximum addressable memory. For 64-bit |
| 198 | * kernel, this problem can't happen here as the end of the virtual |
| 199 | * address space is occupied by the kernel mapping then this check must |
Alexandre Ghiti | fdf3a7a | 2021-07-26 07:42:54 +0200 | [diff] [blame] | 200 | * be done as soon as the kernel mapping base address is determined. |
Atish Patra | abb8e86 | 2021-01-11 15:45:02 -0800 | [diff] [blame] | 201 | */ |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 202 | max_mapped_addr = __pa(~(ulong)0); |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 203 | if (max_mapped_addr == (phys_ram_end - 1)) |
Atish Patra | abb8e86 | 2021-01-11 15:45:02 -0800 | [diff] [blame] | 204 | memblock_set_current_limit(max_mapped_addr - 4096); |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 205 | #endif |
Atish Patra | abb8e86 | 2021-01-11 15:45:02 -0800 | [diff] [blame] | 206 | |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 207 | min_low_pfn = PFN_UP(phys_ram_base); |
| 208 | max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end); |
Kefeng Wang | f6e5aed | 2021-02-25 14:54:17 +0800 | [diff] [blame] | 209 | |
Kefeng Wang | da81558 | 2020-10-31 14:01:12 +0800 | [diff] [blame] | 210 | dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn)); |
Guo Ren | 336e8eb | 2021-01-21 14:31:17 +0800 | [diff] [blame] | 211 | set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET); |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 212 | |
Kefeng Wang | aec33b5 | 2021-01-15 13:46:06 +0800 | [diff] [blame] | 213 | reserve_initrd_mem(); |
Albert Ou | 922b037 | 2019-09-27 16:14:18 -0700 | [diff] [blame] | 214 | /* |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 215 | * If DTB is built in, no need to reserve its memblock. |
| 216 | * Otherwise, do reserve it but avoid using |
| 217 | * early_init_fdt_reserve_self() since __pa() does |
Albert Ou | 922b037 | 2019-09-27 16:14:18 -0700 | [diff] [blame] | 218 | * not work for DTB pointers that are fixmap addresses |
| 219 | */ |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 220 | if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) |
| 221 | memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va)); |
Albert Ou | 922b037 | 2019-09-27 16:14:18 -0700 | [diff] [blame] | 222 | |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 223 | early_init_fdt_scan_reserved_mem(); |
Kefeng Wang | da81558 | 2020-10-31 14:01:12 +0800 | [diff] [blame] | 224 | dma_contiguous_reserve(dma32_phys_limit); |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 225 | memblock_allow_resize(); |
Anup Patel | 0651c26 | 2019-02-21 11:25:49 +0530 | [diff] [blame] | 226 | } |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 227 | |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 228 | #ifdef CONFIG_MMU |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 229 | static struct pt_alloc_ops _pt_ops __initdata; |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 230 | |
| 231 | #ifdef CONFIG_XIP_KERNEL |
| 232 | #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) |
| 233 | #else |
| 234 | #define pt_ops _pt_ops |
| 235 | #endif |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 236 | |
Jisheng Zhang | de31ea4 | 2021-03-30 02:22:51 +0800 | [diff] [blame] | 237 | unsigned long pfn_base __ro_after_init; |
Anup Patel | 387181d | 2019-03-26 08:03:47 +0000 | [diff] [blame] | 238 | EXPORT_SYMBOL(pfn_base); |
| 239 | |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 240 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 241 | pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 242 | static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 243 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 244 | pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 245 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 246 | #ifdef CONFIG_XIP_KERNEL |
| 247 | #define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir)) |
| 248 | #define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte)) |
| 249 | #define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir)) |
| 250 | #endif /* CONFIG_XIP_KERNEL */ |
| 251 | |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 252 | void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) |
| 253 | { |
| 254 | unsigned long addr = __fix_to_virt(idx); |
| 255 | pte_t *ptep; |
| 256 | |
| 257 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
| 258 | |
| 259 | ptep = &fixmap_pte[pte_index(addr)]; |
| 260 | |
Greentime Hu | 21190b7 | 2020-08-04 11:02:05 +0800 | [diff] [blame] | 261 | if (pgprot_val(prot)) |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 262 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); |
Greentime Hu | 21190b7 | 2020-08-04 11:02:05 +0800 | [diff] [blame] | 263 | else |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 264 | pte_clear(&init_mm, addr, ptep); |
Greentime Hu | 21190b7 | 2020-08-04 11:02:05 +0800 | [diff] [blame] | 265 | local_flush_tlb_page(addr); |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 266 | } |
| 267 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 268 | static inline pte_t *__init get_pte_virt_early(phys_addr_t pa) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 269 | { |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 270 | return (pte_t *)((uintptr_t)pa); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 271 | } |
| 272 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 273 | static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) |
| 274 | { |
| 275 | clear_fixmap(FIX_PTE); |
| 276 | return (pte_t *)set_fixmap_offset(FIX_PTE, pa); |
| 277 | } |
| 278 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 279 | static inline pte_t *__init get_pte_virt_late(phys_addr_t pa) |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 280 | { |
| 281 | return (pte_t *) __va(pa); |
| 282 | } |
| 283 | |
| 284 | static inline phys_addr_t __init alloc_pte_early(uintptr_t va) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 285 | { |
| 286 | /* |
| 287 | * We only create PMD or PGD early mappings so we |
| 288 | * should never reach here with MMU disabled. |
| 289 | */ |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 290 | BUG(); |
| 291 | } |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 292 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 293 | static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) |
| 294 | { |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 295 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
| 296 | } |
| 297 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 298 | static phys_addr_t __init alloc_pte_late(uintptr_t va) |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 299 | { |
| 300 | unsigned long vaddr; |
| 301 | |
| 302 | vaddr = __get_free_page(GFP_KERNEL); |
zhouchuangao | e75e6bf | 2021-03-30 06:56:26 -0700 | [diff] [blame] | 303 | BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr))); |
| 304 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 305 | return __pa(vaddr); |
| 306 | } |
| 307 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 308 | static void __init create_pte_mapping(pte_t *ptep, |
| 309 | uintptr_t va, phys_addr_t pa, |
| 310 | phys_addr_t sz, pgprot_t prot) |
| 311 | { |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 312 | uintptr_t pte_idx = pte_index(va); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 313 | |
| 314 | BUG_ON(sz != PAGE_SIZE); |
| 315 | |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 316 | if (pte_none(ptep[pte_idx])) |
| 317 | ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | #ifndef __PAGETABLE_PMD_FOLDED |
| 321 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 322 | static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; |
| 323 | static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; |
| 324 | static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); |
| 325 | static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 326 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 327 | #ifdef CONFIG_XIP_KERNEL |
| 328 | #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) |
| 329 | #define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd)) |
| 330 | #define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd)) |
| 331 | #endif /* CONFIG_XIP_KERNEL */ |
| 332 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 333 | static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 334 | { |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 335 | /* Before MMU is enabled */ |
| 336 | return (pmd_t *)((uintptr_t)pa); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 337 | } |
| 338 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 339 | static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) |
| 340 | { |
| 341 | clear_fixmap(FIX_PMD); |
| 342 | return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); |
| 343 | } |
| 344 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 345 | static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 346 | { |
| 347 | return (pmd_t *) __va(pa); |
| 348 | } |
| 349 | |
| 350 | static phys_addr_t __init alloc_pmd_early(uintptr_t va) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 351 | { |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 352 | BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 353 | |
Alexandre Ghiti | 0f02de4 | 2021-02-21 09:22:33 -0500 | [diff] [blame] | 354 | return (uintptr_t)early_pmd; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 357 | static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) |
| 358 | { |
| 359 | return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
| 360 | } |
| 361 | |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 362 | static phys_addr_t __init alloc_pmd_late(uintptr_t va) |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 363 | { |
| 364 | unsigned long vaddr; |
| 365 | |
| 366 | vaddr = __get_free_page(GFP_KERNEL); |
| 367 | BUG_ON(!vaddr); |
| 368 | return __pa(vaddr); |
| 369 | } |
| 370 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 371 | static void __init create_pmd_mapping(pmd_t *pmdp, |
| 372 | uintptr_t va, phys_addr_t pa, |
| 373 | phys_addr_t sz, pgprot_t prot) |
| 374 | { |
| 375 | pte_t *ptep; |
| 376 | phys_addr_t pte_phys; |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 377 | uintptr_t pmd_idx = pmd_index(va); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 378 | |
| 379 | if (sz == PMD_SIZE) { |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 380 | if (pmd_none(pmdp[pmd_idx])) |
| 381 | pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 382 | return; |
| 383 | } |
| 384 | |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 385 | if (pmd_none(pmdp[pmd_idx])) { |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 386 | pte_phys = pt_ops.alloc_pte(va); |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 387 | pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE); |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 388 | ptep = pt_ops.get_pte_virt(pte_phys); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 389 | memset(ptep, 0, PAGE_SIZE); |
| 390 | } else { |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 391 | pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx])); |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 392 | ptep = pt_ops.get_pte_virt(pte_phys); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | create_pte_mapping(ptep, va, pa, sz, prot); |
| 396 | } |
| 397 | |
| 398 | #define pgd_next_t pmd_t |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 399 | #define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va) |
| 400 | #define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 401 | #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ |
| 402 | create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 403 | #define fixmap_pgd_next fixmap_pmd |
| 404 | #else |
| 405 | #define pgd_next_t pte_t |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 406 | #define alloc_pgd_next(__va) pt_ops.alloc_pte(__va) |
| 407 | #define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 408 | #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ |
| 409 | create_pte_mapping(__nextp, __va, __pa, __sz, __prot) |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 410 | #define fixmap_pgd_next fixmap_pte |
| 411 | #endif |
| 412 | |
Atish Patra | b91540d | 2020-09-17 15:37:15 -0700 | [diff] [blame] | 413 | void __init create_pgd_mapping(pgd_t *pgdp, |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 414 | uintptr_t va, phys_addr_t pa, |
| 415 | phys_addr_t sz, pgprot_t prot) |
| 416 | { |
| 417 | pgd_next_t *nextp; |
| 418 | phys_addr_t next_phys; |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 419 | uintptr_t pgd_idx = pgd_index(va); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 420 | |
| 421 | if (sz == PGDIR_SIZE) { |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 422 | if (pgd_val(pgdp[pgd_idx]) == 0) |
| 423 | pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 424 | return; |
| 425 | } |
| 426 | |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 427 | if (pgd_val(pgdp[pgd_idx]) == 0) { |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 428 | next_phys = alloc_pgd_next(va); |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 429 | pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 430 | nextp = get_pgd_next_virt(next_phys); |
| 431 | memset(nextp, 0, PAGE_SIZE); |
| 432 | } else { |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 433 | next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx])); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 434 | nextp = get_pgd_next_virt(next_phys); |
| 435 | } |
| 436 | |
| 437 | create_pgd_next_mapping(nextp, va, pa, sz, prot); |
| 438 | } |
| 439 | |
| 440 | static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) |
| 441 | { |
Zong Li | 0fdc636 | 2019-11-08 01:00:40 -0800 | [diff] [blame] | 442 | /* Upgrade to PMD_SIZE mappings whenever possible */ |
| 443 | if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) |
| 444 | return PAGE_SIZE; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 445 | |
Zong Li | 0fdc636 | 2019-11-08 01:00:40 -0800 | [diff] [blame] | 446 | return PMD_SIZE; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 447 | } |
| 448 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 449 | #ifdef CONFIG_XIP_KERNEL |
| 450 | /* called from head.S with MMU off */ |
| 451 | asmlinkage void __init __copy_data(void) |
| 452 | { |
| 453 | void *from = (void *)(&_sdata); |
| 454 | void *end = (void *)(&_end); |
| 455 | void *to = (void *)CONFIG_PHYS_RAM_BASE; |
| 456 | size_t sz = (size_t)(end - from + 1); |
| 457 | |
| 458 | memcpy(to, from, sz); |
| 459 | } |
| 460 | #endif |
| 461 | |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 462 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 463 | static __init pgprot_t pgprot_from_va(uintptr_t va) |
| 464 | { |
| 465 | if (is_va_kernel_text(va)) |
| 466 | return PAGE_KERNEL_READ_EXEC; |
| 467 | |
| 468 | /* |
| 469 | * In 64-bit kernel, the kernel mapping is outside the linear mapping so |
| 470 | * we must protect its linear mapping alias from being executed and |
| 471 | * written. |
| 472 | * And rodata section is marked readonly in mark_rodata_ro. |
| 473 | */ |
| 474 | if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va)) |
| 475 | return PAGE_KERNEL_READ; |
| 476 | |
| 477 | return PAGE_KERNEL; |
| 478 | } |
| 479 | |
| 480 | void mark_rodata_ro(void) |
| 481 | { |
| 482 | set_kernel_memory(__start_rodata, _data, set_memory_ro); |
| 483 | if (IS_ENABLED(CONFIG_64BIT)) |
| 484 | set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), |
| 485 | set_memory_ro); |
| 486 | |
| 487 | debug_checkwx(); |
| 488 | } |
| 489 | #else |
| 490 | static __init pgprot_t pgprot_from_va(uintptr_t va) |
| 491 | { |
| 492 | if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va)) |
| 493 | return PAGE_KERNEL; |
| 494 | |
| 495 | return PAGE_KERNEL_EXEC; |
| 496 | } |
| 497 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
| 498 | |
Anup Patel | 387181d | 2019-03-26 08:03:47 +0000 | [diff] [blame] | 499 | /* |
| 500 | * setup_vm() is called from head.S with MMU-off. |
| 501 | * |
| 502 | * Following requirements should be honoured for setup_vm() to work |
| 503 | * correctly: |
| 504 | * 1) It should use PC-relative addressing for accessing kernel symbols. |
| 505 | * To achieve this we always use GCC cmodel=medany. |
| 506 | * 2) The compiler instrumentation for FTRACE will not work for setup_vm() |
| 507 | * so disable compiler instrumentation when FTRACE is enabled. |
| 508 | * |
| 509 | * Currently, the above requirements are honoured by using custom CFLAGS |
| 510 | * for init.o in mm/Makefile. |
| 511 | */ |
| 512 | |
| 513 | #ifndef __riscv_cmodel_medany |
Paul Walmsley | 6a527b6 | 2019-10-17 14:45:58 -0700 | [diff] [blame] | 514 | #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." |
Anup Patel | 387181d | 2019-03-26 08:03:47 +0000 | [diff] [blame] | 515 | #endif |
| 516 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 517 | #ifdef CONFIG_XIP_KERNEL |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 518 | static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, |
| 519 | __always_unused bool early) |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 520 | { |
| 521 | uintptr_t va, end_va; |
| 522 | |
| 523 | /* Map the flash resident part */ |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 524 | end_va = kernel_map.virt_addr + kernel_map.xiprom_sz; |
| 525 | for (va = kernel_map.virt_addr; va < end_va; va += map_size) |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 526 | create_pgd_mapping(pgdir, va, |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 527 | kernel_map.xiprom + (va - kernel_map.virt_addr), |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 528 | map_size, PAGE_KERNEL_EXEC); |
| 529 | |
| 530 | /* Map the data in RAM */ |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 531 | end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size; |
| 532 | for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size) |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 533 | create_pgd_mapping(pgdir, va, |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 534 | kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)), |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 535 | map_size, PAGE_KERNEL); |
| 536 | } |
| 537 | #else |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 538 | static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size, |
| 539 | bool early) |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 540 | { |
| 541 | uintptr_t va, end_va; |
| 542 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 543 | end_va = kernel_map.virt_addr + kernel_map.size; |
| 544 | for (va = kernel_map.virt_addr; va < end_va; va += map_size) |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 545 | create_pgd_mapping(pgdir, va, |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 546 | kernel_map.phys_addr + (va - kernel_map.virt_addr), |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 547 | map_size, |
| 548 | early ? |
| 549 | PAGE_KERNEL_EXEC : pgprot_from_va(va)); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 550 | } |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 551 | #endif |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 552 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 553 | asmlinkage void __init setup_vm(uintptr_t dtb_pa) |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 554 | { |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 555 | uintptr_t __maybe_unused pa; |
Alexandre Ghiti | 0f02de4 | 2021-02-21 09:22:33 -0500 | [diff] [blame] | 556 | uintptr_t map_size; |
Atish Patra | 6262f66 | 2020-09-17 15:37:11 -0700 | [diff] [blame] | 557 | #ifndef __PAGETABLE_PMD_FOLDED |
| 558 | pmd_t fix_bmap_spmd, fix_bmap_epmd; |
| 559 | #endif |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 560 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 561 | kernel_map.virt_addr = KERNEL_LINK_ADDR; |
| 562 | |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 563 | #ifdef CONFIG_XIP_KERNEL |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 564 | kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR; |
| 565 | kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 566 | |
Alexandre Ghiti | 6d7f91d | 2021-07-21 09:59:35 +0200 | [diff] [blame] | 567 | phys_ram_base = CONFIG_PHYS_RAM_BASE; |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 568 | kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; |
| 569 | kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 570 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 571 | kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom; |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 572 | #else |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 573 | kernel_map.phys_addr = (uintptr_t)(&_start); |
| 574 | kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr; |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 575 | #endif |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 576 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 577 | kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr; |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 578 | #ifdef CONFIG_64BIT |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 579 | kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr; |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 580 | #endif |
| 581 | |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 582 | pfn_base = PFN_DOWN(kernel_map.phys_addr); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 583 | |
| 584 | /* |
| 585 | * Enforce boot alignment requirements of RV32 and |
| 586 | * RV64 by only allowing PMD or PGD mappings. |
| 587 | */ |
Alexandre Ghiti | 0f02de4 | 2021-02-21 09:22:33 -0500 | [diff] [blame] | 588 | map_size = PMD_SIZE; |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 589 | |
| 590 | /* Sanity check alignment and size */ |
| 591 | BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0); |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 592 | BUG_ON((kernel_map.phys_addr % map_size) != 0); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 593 | |
Alexandre Ghiti | db6b84a | 2021-06-29 11:13:48 +0200 | [diff] [blame] | 594 | #ifdef CONFIG_64BIT |
| 595 | /* |
| 596 | * The last 4K bytes of the addressable memory can not be mapped because |
| 597 | * of IS_ERR_VALUE macro. |
| 598 | */ |
| 599 | BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K); |
| 600 | #endif |
| 601 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 602 | pt_ops.alloc_pte = alloc_pte_early; |
| 603 | pt_ops.get_pte_virt = get_pte_virt_early; |
| 604 | #ifndef __PAGETABLE_PMD_FOLDED |
| 605 | pt_ops.alloc_pmd = alloc_pmd_early; |
| 606 | pt_ops.get_pmd_virt = get_pmd_virt_early; |
| 607 | #endif |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 608 | /* Setup early PGD for fixmap */ |
| 609 | create_pgd_mapping(early_pg_dir, FIXADDR_START, |
| 610 | (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE); |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 611 | |
| 612 | #ifndef __PAGETABLE_PMD_FOLDED |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 613 | /* Setup fixmap PMD */ |
| 614 | create_pmd_mapping(fixmap_pmd, FIXADDR_START, |
| 615 | (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); |
| 616 | /* Setup trampoline PGD and PMD */ |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 617 | create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 618 | (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 619 | #ifdef CONFIG_XIP_KERNEL |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 620 | create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, |
| 621 | kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 622 | #else |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 623 | create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr, |
| 624 | kernel_map.phys_addr, PMD_SIZE, PAGE_KERNEL_EXEC); |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 625 | #endif |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 626 | #else |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 627 | /* Setup trampoline PGD */ |
Alexandre Ghiti | 658e2c5 | 2021-06-17 15:53:07 +0200 | [diff] [blame] | 628 | create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr, |
| 629 | kernel_map.phys_addr, PGDIR_SIZE, PAGE_KERNEL_EXEC); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 630 | #endif |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 631 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 632 | /* |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 633 | * Setup early PGD covering entire kernel which will allow |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 634 | * us to reach paging_init(). We map all memory banks later |
| 635 | * in setup_vm_final() below. |
| 636 | */ |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 637 | create_kernel_page_table(early_pg_dir, map_size, true); |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 638 | |
Anup Patel | 1074dd4 | 2020-11-04 12:07:13 +0530 | [diff] [blame] | 639 | #ifndef __PAGETABLE_PMD_FOLDED |
| 640 | /* Setup early PMD for DTB */ |
| 641 | create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, |
| 642 | (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE); |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 643 | #ifndef CONFIG_BUILTIN_DTB |
Anup Patel | 1074dd4 | 2020-11-04 12:07:13 +0530 | [diff] [blame] | 644 | /* Create two consecutive PMD mappings for FDT early scan */ |
| 645 | pa = dtb_pa & ~(PMD_SIZE - 1); |
| 646 | create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, |
| 647 | pa, PMD_SIZE, PAGE_KERNEL); |
| 648 | create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, |
| 649 | pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); |
| 650 | dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 651 | #else /* CONFIG_BUILTIN_DTB */ |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 652 | #ifdef CONFIG_64BIT |
| 653 | /* |
| 654 | * __va can't be used since it would return a linear mapping address |
| 655 | * whereas dtb_early_va will be used before setup_vm_final installs |
| 656 | * the linear mapping. |
| 657 | */ |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 658 | dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 659 | #else |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 660 | dtb_early_va = __va(dtb_pa); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 661 | #endif /* CONFIG_64BIT */ |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 662 | #endif /* CONFIG_BUILTIN_DTB */ |
Anup Patel | 1074dd4 | 2020-11-04 12:07:13 +0530 | [diff] [blame] | 663 | #else |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 664 | #ifndef CONFIG_BUILTIN_DTB |
Anup Patel | 8f3a2b4 | 2020-09-17 15:37:10 -0700 | [diff] [blame] | 665 | /* Create two consecutive PGD mappings for FDT early scan */ |
| 666 | pa = dtb_pa & ~(PGDIR_SIZE - 1); |
| 667 | create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, |
| 668 | pa, PGDIR_SIZE, PAGE_KERNEL); |
| 669 | create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, |
| 670 | pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); |
| 671 | dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 672 | #else /* CONFIG_BUILTIN_DTB */ |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 673 | #ifdef CONFIG_64BIT |
Vitaly Wool | 44c9225 | 2021-04-13 02:35:14 -0400 | [diff] [blame] | 674 | dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa)); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 675 | #else |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 676 | dtb_early_va = __va(dtb_pa); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 677 | #endif /* CONFIG_64BIT */ |
Vitaly Wool | f105aa9 | 2021-01-16 01:49:48 +0200 | [diff] [blame] | 678 | #endif /* CONFIG_BUILTIN_DTB */ |
Anup Patel | 1074dd4 | 2020-11-04 12:07:13 +0530 | [diff] [blame] | 679 | #endif |
Albert Ou | 922b037 | 2019-09-27 16:14:18 -0700 | [diff] [blame] | 680 | dtb_early_pa = dtb_pa; |
Atish Patra | 6262f66 | 2020-09-17 15:37:11 -0700 | [diff] [blame] | 681 | |
| 682 | /* |
| 683 | * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap |
| 684 | * range can not span multiple pmds. |
| 685 | */ |
| 686 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) |
| 687 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); |
| 688 | |
| 689 | #ifndef __PAGETABLE_PMD_FOLDED |
| 690 | /* |
| 691 | * Early ioremap fixmap is already created as it lies within first 2MB |
| 692 | * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END |
| 693 | * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn |
| 694 | * the user if not. |
| 695 | */ |
| 696 | fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))]; |
| 697 | fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))]; |
| 698 | if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) { |
| 699 | WARN_ON(1); |
| 700 | pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n", |
| 701 | pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd)); |
| 702 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
| 703 | fix_to_virt(FIX_BTMAP_BEGIN)); |
| 704 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", |
| 705 | fix_to_virt(FIX_BTMAP_END)); |
| 706 | |
| 707 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 708 | pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); |
| 709 | } |
| 710 | #endif |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 711 | } |
| 712 | |
| 713 | static void __init setup_vm_final(void) |
| 714 | { |
| 715 | uintptr_t va, map_size; |
| 716 | phys_addr_t pa, start, end; |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 717 | u64 i; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 718 | |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 719 | /** |
| 720 | * MMU is enabled at this point. But page table setup is not complete yet. |
| 721 | * fixmap page table alloc functions should be used at this point |
| 722 | */ |
| 723 | pt_ops.alloc_pte = alloc_pte_fixmap; |
| 724 | pt_ops.get_pte_virt = get_pte_virt_fixmap; |
| 725 | #ifndef __PAGETABLE_PMD_FOLDED |
| 726 | pt_ops.alloc_pmd = alloc_pmd_fixmap; |
| 727 | pt_ops.get_pmd_virt = get_pmd_virt_fixmap; |
| 728 | #endif |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 729 | /* Setup swapper PGD for fixmap */ |
| 730 | create_pgd_mapping(swapper_pg_dir, FIXADDR_START, |
Zong Li | ac51e00 | 2020-01-02 11:12:40 +0800 | [diff] [blame] | 731 | __pa_symbol(fixmap_pgd_next), |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 732 | PGDIR_SIZE, PAGE_TABLE); |
| 733 | |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 734 | /* Map all memory banks in the linear mapping */ |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 735 | for_each_mem_range(i, &start, &end) { |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 736 | if (start >= end) |
| 737 | break; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 738 | if (start <= __pa(PAGE_OFFSET) && |
| 739 | __pa(PAGE_OFFSET) < end) |
| 740 | start = __pa(PAGE_OFFSET); |
Alexandre Ghiti | c99127c | 2021-06-29 11:13:47 +0200 | [diff] [blame] | 741 | if (end >= __pa(PAGE_OFFSET) + memory_limit) |
| 742 | end = __pa(PAGE_OFFSET) + memory_limit; |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 743 | |
| 744 | map_size = best_map_size(start, end - start); |
| 745 | for (pa = start; pa < end; pa += map_size) { |
| 746 | va = (uintptr_t)__va(pa); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 747 | |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 748 | create_pgd_mapping(swapper_pg_dir, va, pa, map_size, |
| 749 | pgprot_from_va(va)); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 750 | } |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 751 | } |
Anup Patel | f2c17aa | 2019-01-07 20:57:01 +0530 | [diff] [blame] | 752 | |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 753 | #ifdef CONFIG_64BIT |
| 754 | /* Map the kernel */ |
Alexandre Ghiti | e5c35fa0 | 2021-06-24 14:00:41 +0200 | [diff] [blame] | 755 | create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false); |
Alexandre Ghiti | 2bfc6cd | 2021-04-11 12:41:44 -0400 | [diff] [blame] | 756 | #endif |
| 757 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 758 | /* Clear fixmap PTE and PMD mappings */ |
| 759 | clear_fixmap(FIX_PTE); |
| 760 | clear_fixmap(FIX_PMD); |
| 761 | |
| 762 | /* Move to swapper page table */ |
Zong Li | ac51e00 | 2020-01-02 11:12:40 +0800 | [diff] [blame] | 763 | csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 764 | local_flush_tlb_all(); |
Atish Patra | e8dcb61 | 2020-09-17 15:37:12 -0700 | [diff] [blame] | 765 | |
| 766 | /* generic page allocation functions must be used to setup page table */ |
| 767 | pt_ops.alloc_pte = alloc_pte_late; |
| 768 | pt_ops.get_pte_virt = get_pte_virt_late; |
| 769 | #ifndef __PAGETABLE_PMD_FOLDED |
| 770 | pt_ops.alloc_pmd = alloc_pmd_late; |
| 771 | pt_ops.get_pmd_virt = get_pmd_virt_late; |
| 772 | #endif |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 773 | } |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 774 | #else |
| 775 | asmlinkage void __init setup_vm(uintptr_t dtb_pa) |
| 776 | { |
| 777 | dtb_early_va = (void *)dtb_pa; |
Atish Patra | a78c6f5 | 2020-10-01 12:04:56 -0700 | [diff] [blame] | 778 | dtb_early_pa = dtb_pa; |
Christoph Hellwig | 6bd33e1 | 2019-10-28 13:10:41 +0100 | [diff] [blame] | 779 | } |
| 780 | |
| 781 | static inline void setup_vm_final(void) |
| 782 | { |
| 783 | } |
| 784 | #endif /* CONFIG_MMU */ |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 785 | |
Nick Kossifidis | e53d281 | 2021-04-19 03:55:38 +0300 | [diff] [blame] | 786 | #ifdef CONFIG_KEXEC_CORE |
| 787 | /* |
| 788 | * reserve_crashkernel() - reserves memory for crash kernel |
| 789 | * |
| 790 | * This function reserves memory area given in "crashkernel=" kernel command |
| 791 | * line parameter. The memory reserved is used by dump capture kernel when |
| 792 | * primary kernel is crashing. |
| 793 | */ |
| 794 | static void __init reserve_crashkernel(void) |
| 795 | { |
| 796 | unsigned long long crash_base = 0; |
| 797 | unsigned long long crash_size = 0; |
| 798 | unsigned long search_start = memblock_start_of_DRAM(); |
| 799 | unsigned long search_end = memblock_end_of_DRAM(); |
| 800 | |
| 801 | int ret = 0; |
| 802 | |
Nick Kossifidis | 5640975 | 2021-04-19 03:55:39 +0300 | [diff] [blame] | 803 | /* |
| 804 | * Don't reserve a region for a crash kernel on a crash kernel |
| 805 | * since it doesn't make much sense and we have limited memory |
| 806 | * resources. |
| 807 | */ |
| 808 | #ifdef CONFIG_CRASH_DUMP |
| 809 | if (is_kdump_kernel()) { |
| 810 | pr_info("crashkernel: ignoring reservation request\n"); |
| 811 | return; |
| 812 | } |
| 813 | #endif |
| 814 | |
Nick Kossifidis | e53d281 | 2021-04-19 03:55:38 +0300 | [diff] [blame] | 815 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), |
| 816 | &crash_size, &crash_base); |
| 817 | if (ret || !crash_size) |
| 818 | return; |
| 819 | |
| 820 | crash_size = PAGE_ALIGN(crash_size); |
| 821 | |
| 822 | if (crash_base == 0) { |
| 823 | /* |
| 824 | * Current riscv boot protocol requires 2MB alignment for |
| 825 | * RV64 and 4MB alignment for RV32 (hugepage size) |
| 826 | */ |
| 827 | crash_base = memblock_find_in_range(search_start, search_end, |
| 828 | crash_size, PMD_SIZE); |
| 829 | |
| 830 | if (crash_base == 0) { |
| 831 | pr_warn("crashkernel: couldn't allocate %lldKB\n", |
| 832 | crash_size >> 10); |
| 833 | return; |
| 834 | } |
| 835 | } else { |
| 836 | /* User specifies base address explicitly. */ |
| 837 | if (!memblock_is_region_memory(crash_base, crash_size)) { |
| 838 | pr_warn("crashkernel: requested region is not memory\n"); |
| 839 | return; |
| 840 | } |
| 841 | |
| 842 | if (memblock_is_region_reserved(crash_base, crash_size)) { |
| 843 | pr_warn("crashkernel: requested region is reserved\n"); |
| 844 | return; |
| 845 | } |
| 846 | |
| 847 | |
| 848 | if (!IS_ALIGNED(crash_base, PMD_SIZE)) { |
| 849 | pr_warn("crashkernel: requested region is misaligned\n"); |
| 850 | return; |
| 851 | } |
| 852 | } |
| 853 | memblock_reserve(crash_base, crash_size); |
| 854 | |
| 855 | pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n", |
| 856 | crash_base, crash_base + crash_size, crash_size >> 20); |
| 857 | |
| 858 | crashk_res.start = crash_base; |
| 859 | crashk_res.end = crash_base + crash_size - 1; |
| 860 | } |
| 861 | #endif /* CONFIG_KEXEC_CORE */ |
| 862 | |
Nick Kossifidis | 5640975 | 2021-04-19 03:55:39 +0300 | [diff] [blame] | 863 | #ifdef CONFIG_CRASH_DUMP |
| 864 | /* |
| 865 | * We keep track of the ELF core header of the crashed |
| 866 | * kernel with a reserved-memory region with compatible |
| 867 | * string "linux,elfcorehdr". Here we register a callback |
| 868 | * to populate elfcorehdr_addr/size when this region is |
| 869 | * present. Note that this region will be marked as |
| 870 | * reserved once we call early_init_fdt_scan_reserved_mem() |
| 871 | * later on. |
| 872 | */ |
Jisheng Zhang | 0106235 | 2021-05-16 21:15:56 +0800 | [diff] [blame] | 873 | static int __init elfcore_hdr_setup(struct reserved_mem *rmem) |
Nick Kossifidis | 5640975 | 2021-04-19 03:55:39 +0300 | [diff] [blame] | 874 | { |
| 875 | elfcorehdr_addr = rmem->base; |
| 876 | elfcorehdr_size = rmem->size; |
| 877 | return 0; |
| 878 | } |
| 879 | |
| 880 | RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); |
| 881 | #endif |
| 882 | |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 883 | void __init paging_init(void) |
| 884 | { |
Kefeng Wang | f842f5f | 2021-05-10 19:42:22 +0800 | [diff] [blame] | 885 | setup_bootmem(); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 886 | setup_vm_final(); |
Atish Patra | cbd34f4 | 2020-11-18 16:38:27 -0800 | [diff] [blame] | 887 | } |
| 888 | |
| 889 | void __init misc_mem_init(void) |
| 890 | { |
Kefeng Wang | f6e5aed | 2021-02-25 14:54:17 +0800 | [diff] [blame] | 891 | early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT); |
Atish Patra | 4f0e8ee | 2020-11-18 16:38:29 -0800 | [diff] [blame] | 892 | arch_numa_init(); |
Atish Patra | cbd34f4 | 2020-11-18 16:38:27 -0800 | [diff] [blame] | 893 | sparse_init(); |
Anup Patel | 671f9a3 | 2019-06-28 13:36:21 -0700 | [diff] [blame] | 894 | zone_sizes_init(); |
Nick Kossifidis | e53d281 | 2021-04-19 03:55:38 +0300 | [diff] [blame] | 895 | #ifdef CONFIG_KEXEC_CORE |
| 896 | reserve_crashkernel(); |
| 897 | #endif |
Atish Patra | 4f0e8ee | 2020-11-18 16:38:29 -0800 | [diff] [blame] | 898 | memblock_dump_all(); |
Anup Patel | 6f1e9e9 | 2019-02-13 16:38:36 +0530 | [diff] [blame] | 899 | } |
Logan Gunthorpe | d95f1a5 | 2019-08-28 15:40:54 -0600 | [diff] [blame] | 900 | |
Kefeng Wang | 9fe57d8 | 2019-10-23 11:23:02 +0800 | [diff] [blame] | 901 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Logan Gunthorpe | d95f1a5 | 2019-08-28 15:40:54 -0600 | [diff] [blame] | 902 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
| 903 | struct vmem_altmap *altmap) |
| 904 | { |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 905 | return vmemmap_populate_basepages(start, end, node, NULL); |
Logan Gunthorpe | d95f1a5 | 2019-08-28 15:40:54 -0600 | [diff] [blame] | 906 | } |
| 907 | #endif |