Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/mm/mmu.c |
| 4 | * |
| 5 | * Copyright (C) 1995-2005 Russell King |
| 6 | * Copyright (C) 2012 ARM Ltd. |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
Jisheng Zhang | 5a9e3e1 | 2016-08-15 14:45:46 +0800 | [diff] [blame] | 9 | #include <linux/cache.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 10 | #include <linux/export.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 14 | #include <linux/ioport.h> |
| 15 | #include <linux/kexec.h> |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 16 | #include <linux/libfdt.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 17 | #include <linux/mman.h> |
| 18 | #include <linux/nodemask.h> |
| 19 | #include <linux/memblock.h> |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 20 | #include <linux/memory.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 21 | #include <linux/fs.h> |
Catalin Marinas | 2475ff9 | 2012-10-23 14:55:08 +0100 | [diff] [blame] | 22 | #include <linux/io.h> |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 23 | #include <linux/mm.h> |
Tobias Klauser | 6efd849 | 2017-05-15 13:40:20 +0200 | [diff] [blame] | 24 | #include <linux/vmalloc.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 25 | |
Mark Rutland | 21ab99c | 2016-01-25 11:44:56 +0000 | [diff] [blame] | 26 | #include <asm/barrier.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 27 | #include <asm/cputype.h> |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 28 | #include <asm/fixmap.h> |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 29 | #include <asm/kasan.h> |
Suzuki K. Poulose | b433dce | 2015-10-19 14:19:28 +0100 | [diff] [blame] | 30 | #include <asm/kernel-pgtable.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 31 | #include <asm/sections.h> |
| 32 | #include <asm/setup.h> |
Masahiro Yamada | 87dfb31 | 2019-05-14 15:46:51 -0700 | [diff] [blame] | 33 | #include <linux/sizes.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 34 | #include <asm/tlb.h> |
| 35 | #include <asm/mmu_context.h> |
Laura Abbott | 1404d6f | 2016-10-27 09:27:34 -0700 | [diff] [blame] | 36 | #include <asm/ptdump.h> |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 37 | #include <asm/tlbflush.h> |
Mike Rapoport | ca15ca4 | 2020-08-06 23:22:28 -0700 | [diff] [blame] | 38 | #include <asm/pgalloc.h> |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 39 | |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 40 | #define NO_BLOCK_MAPPINGS BIT(0) |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 41 | #define NO_CONT_MAPPINGS BIT(1) |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 42 | |
Ard Biesheuvel | 3ebd4bd | 2021-03-10 18:15:11 +0100 | [diff] [blame] | 43 | u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); |
Kristina Martsenko | fa2a844 | 2017-12-13 17:07:24 +0000 | [diff] [blame] | 44 | u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; |
Ard Biesheuvel | dd006da | 2015-03-19 16:42:27 +0000 | [diff] [blame] | 45 | |
Joe Perches | 33def84 | 2020-10-21 19:36:07 -0700 | [diff] [blame] | 46 | u64 __section(".mmuoff.data.write") vabits_actual; |
Steve Capper | 5383cc6 | 2019-08-07 16:55:18 +0100 | [diff] [blame] | 47 | EXPORT_SYMBOL(vabits_actual); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 48 | |
Jisheng Zhang | 5a9e3e1 | 2016-08-15 14:45:46 +0800 | [diff] [blame] | 49 | u64 kimage_voffset __ro_after_init; |
Ard Biesheuvel | a7f8de1 | 2016-02-16 13:52:42 +0100 | [diff] [blame] | 50 | EXPORT_SYMBOL(kimage_voffset); |
| 51 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 52 | /* |
| 53 | * Empty_zero_page is a special page that is used for zero-initialized data |
| 54 | * and COW. |
| 55 | */ |
Mark Rutland | 5227cfa | 2016-01-25 11:44:57 +0000 | [diff] [blame] | 56 | unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 57 | EXPORT_SYMBOL(empty_zero_page); |
| 58 | |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 59 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; |
| 60 | static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; |
| 61 | static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; |
| 62 | |
Jun Yao | 2330b7c | 2018-09-24 17:15:02 +0100 | [diff] [blame] | 63 | static DEFINE_SPINLOCK(swapper_pgdir_lock); |
| 64 | |
| 65 | void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) |
| 66 | { |
| 67 | pgd_t *fixmap_pgdp; |
| 68 | |
| 69 | spin_lock(&swapper_pgdir_lock); |
James Morse | 26a6f87 | 2018-10-10 15:43:22 +0100 | [diff] [blame] | 70 | fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); |
Jun Yao | 2330b7c | 2018-09-24 17:15:02 +0100 | [diff] [blame] | 71 | WRITE_ONCE(*fixmap_pgdp, pgd); |
| 72 | /* |
| 73 | * We need dsb(ishst) here to ensure the page-table-walker sees |
| 74 | * our new entry before set_p?d() returns. The fixmap's |
| 75 | * flush_tlb_kernel_range() via clear_fixmap() does this for us. |
| 76 | */ |
| 77 | pgd_clear_fixmap(); |
| 78 | spin_unlock(&swapper_pgdir_lock); |
| 79 | } |
| 80 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 81 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 82 | unsigned long size, pgprot_t vma_prot) |
| 83 | { |
| 84 | if (!pfn_valid(pfn)) |
| 85 | return pgprot_noncached(vma_prot); |
| 86 | else if (file->f_flags & O_SYNC) |
| 87 | return pgprot_writecombine(vma_prot); |
| 88 | return vma_prot; |
| 89 | } |
| 90 | EXPORT_SYMBOL(phys_mem_access_prot); |
| 91 | |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 92 | static phys_addr_t __init early_pgtable_alloc(int shift) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 93 | { |
Suzuki K. Poulose | 7142392 | 2015-11-20 17:45:40 +0000 | [diff] [blame] | 94 | phys_addr_t phys; |
| 95 | void *ptr; |
| 96 | |
Mike Rapoport | 9a8dd70 | 2018-10-30 15:07:59 -0700 | [diff] [blame] | 97 | phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); |
Mike Rapoport | ecc3e77 | 2019-03-11 23:29:26 -0700 | [diff] [blame] | 98 | if (!phys) |
| 99 | panic("Failed to allocate page table page\n"); |
Mark Rutland | f471044 | 2016-01-25 11:45:08 +0000 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE |
| 103 | * slot will be free, so we can (ab)use the FIX_PTE slot to initialise |
| 104 | * any level of table. |
| 105 | */ |
| 106 | ptr = pte_set_fixmap(phys); |
| 107 | |
Mark Rutland | 21ab99c | 2016-01-25 11:44:56 +0000 | [diff] [blame] | 108 | memset(ptr, 0, PAGE_SIZE); |
| 109 | |
Mark Rutland | f471044 | 2016-01-25 11:45:08 +0000 | [diff] [blame] | 110 | /* |
| 111 | * Implicit barriers also ensure the zeroed page is visible to the page |
| 112 | * table walker |
| 113 | */ |
| 114 | pte_clear_fixmap(); |
| 115 | |
| 116 | return phys; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 117 | } |
| 118 | |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 119 | static bool pgattr_change_is_safe(u64 old, u64 new) |
| 120 | { |
| 121 | /* |
| 122 | * The following mapping attributes may be updated in live |
| 123 | * kernel mappings without the need for break-before-make. |
| 124 | */ |
Catalin Marinas | 0178dc7 | 2019-11-27 09:51:13 +0000 | [diff] [blame] | 125 | pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 126 | |
Ard Biesheuvel | 141d149 | 2017-03-09 21:52:06 +0100 | [diff] [blame] | 127 | /* creating or taking down mappings is always safe */ |
| 128 | if (old == 0 || new == 0) |
| 129 | return true; |
| 130 | |
| 131 | /* live contiguous mappings may not be manipulated at all */ |
| 132 | if ((old | new) & PTE_CONT) |
| 133 | return false; |
| 134 | |
Ard Biesheuvel | 753e8ab | 2018-02-23 18:04:48 +0000 | [diff] [blame] | 135 | /* Transitioning from Non-Global to Global is unsafe */ |
| 136 | if (old & ~new & PTE_NG) |
| 137 | return false; |
Will Deacon | 4e60205 | 2018-01-29 11:59:54 +0000 | [diff] [blame] | 138 | |
Catalin Marinas | 0178dc7 | 2019-11-27 09:51:13 +0000 | [diff] [blame] | 139 | /* |
| 140 | * Changing the memory type between Normal and Normal-Tagged is safe |
| 141 | * since Tagged is considered a permission attribute from the |
| 142 | * mismatched attribute aliases perspective. |
| 143 | */ |
| 144 | if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || |
| 145 | (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && |
| 146 | ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || |
| 147 | (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) |
| 148 | mask |= PTE_ATTRINDX_MASK; |
| 149 | |
Ard Biesheuvel | 141d149 | 2017-03-09 21:52:06 +0100 | [diff] [blame] | 150 | return ((old ^ new) & ~mask) == 0; |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 151 | } |
| 152 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 153 | static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 154 | phys_addr_t phys, pgprot_t prot) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 155 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 156 | pte_t *ptep; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 157 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 158 | ptep = pte_set_fixmap_offset(pmdp, addr); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 159 | do { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 160 | pte_t old_pte = READ_ONCE(*ptep); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 161 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 162 | set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 163 | |
| 164 | /* |
| 165 | * After the PTE entry has been populated once, we |
| 166 | * only allow updates to the permission attributes. |
| 167 | */ |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 168 | BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), |
| 169 | READ_ONCE(pte_val(*ptep)))); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 170 | |
Ard Biesheuvel | e393cf4 | 2017-03-09 21:52:04 +0100 | [diff] [blame] | 171 | phys += PAGE_SIZE; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 172 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
Mark Rutland | f471044 | 2016-01-25 11:45:08 +0000 | [diff] [blame] | 173 | |
| 174 | pte_clear_fixmap(); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 175 | } |
| 176 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 177 | static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 178 | unsigned long end, phys_addr_t phys, |
| 179 | pgprot_t prot, |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 180 | phys_addr_t (*pgtable_alloc)(int), |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 181 | int flags) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 182 | { |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 183 | unsigned long next; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 184 | pmd_t pmd = READ_ONCE(*pmdp); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 185 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 186 | BUG_ON(pmd_sect(pmd)); |
| 187 | if (pmd_none(pmd)) { |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 188 | phys_addr_t pte_phys; |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 189 | BUG_ON(!pgtable_alloc); |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 190 | pte_phys = pgtable_alloc(PAGE_SHIFT); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 191 | __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); |
| 192 | pmd = READ_ONCE(*pmdp); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 193 | } |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 194 | BUG_ON(pmd_bad(pmd)); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 195 | |
| 196 | do { |
| 197 | pgprot_t __prot = prot; |
| 198 | |
| 199 | next = pte_cont_addr_end(addr, end); |
| 200 | |
| 201 | /* use a contiguous mapping if the range is suitably aligned */ |
| 202 | if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && |
| 203 | (flags & NO_CONT_MAPPINGS) == 0) |
| 204 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 205 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 206 | init_pte(pmdp, addr, next, phys, __prot); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 207 | |
| 208 | phys += next - addr; |
| 209 | } while (addr = next, addr != end); |
| 210 | } |
| 211 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 212 | static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 213 | phys_addr_t phys, pgprot_t prot, |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 214 | phys_addr_t (*pgtable_alloc)(int), int flags) |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 215 | { |
| 216 | unsigned long next; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 217 | pmd_t *pmdp; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 218 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 219 | pmdp = pmd_set_fixmap_offset(pudp, addr); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 220 | do { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 221 | pmd_t old_pmd = READ_ONCE(*pmdp); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 222 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 223 | next = pmd_addr_end(addr, end); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 224 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 225 | /* try section mapping first */ |
Laura Abbott | 83863f2 | 2016-02-05 16:24:47 -0800 | [diff] [blame] | 226 | if (((addr | next | phys) & ~SECTION_MASK) == 0 && |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 227 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 228 | pmd_set_huge(pmdp, phys, prot); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 229 | |
Catalin Marinas | a55f992 | 2014-02-04 16:01:31 +0000 | [diff] [blame] | 230 | /* |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 231 | * After the PMD entry has been populated once, we |
| 232 | * only allow updates to the permission attributes. |
Catalin Marinas | a55f992 | 2014-02-04 16:01:31 +0000 | [diff] [blame] | 233 | */ |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 234 | BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 235 | READ_ONCE(pmd_val(*pmdp)))); |
Catalin Marinas | a55f992 | 2014-02-04 16:01:31 +0000 | [diff] [blame] | 236 | } else { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 237 | alloc_init_cont_pte(pmdp, addr, next, phys, prot, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 238 | pgtable_alloc, flags); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 239 | |
| 240 | BUG_ON(pmd_val(old_pmd) != 0 && |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 241 | pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); |
Catalin Marinas | a55f992 | 2014-02-04 16:01:31 +0000 | [diff] [blame] | 242 | } |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 243 | phys += next - addr; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 244 | } while (pmdp++, addr = next, addr != end); |
Mark Rutland | f471044 | 2016-01-25 11:45:08 +0000 | [diff] [blame] | 245 | |
| 246 | pmd_clear_fixmap(); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 247 | } |
| 248 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 249 | static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 250 | unsigned long end, phys_addr_t phys, |
| 251 | pgprot_t prot, |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 252 | phys_addr_t (*pgtable_alloc)(int), int flags) |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 253 | { |
| 254 | unsigned long next; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 255 | pud_t pud = READ_ONCE(*pudp); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 256 | |
| 257 | /* |
| 258 | * Check for initial section mappings in the pgd/pud. |
| 259 | */ |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 260 | BUG_ON(pud_sect(pud)); |
| 261 | if (pud_none(pud)) { |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 262 | phys_addr_t pmd_phys; |
| 263 | BUG_ON(!pgtable_alloc); |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 264 | pmd_phys = pgtable_alloc(PMD_SHIFT); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 265 | __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); |
| 266 | pud = READ_ONCE(*pudp); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 267 | } |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 268 | BUG_ON(pud_bad(pud)); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 269 | |
| 270 | do { |
| 271 | pgprot_t __prot = prot; |
| 272 | |
| 273 | next = pmd_cont_addr_end(addr, end); |
| 274 | |
| 275 | /* use a contiguous mapping if the range is suitably aligned */ |
| 276 | if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && |
| 277 | (flags & NO_CONT_MAPPINGS) == 0) |
| 278 | __prot = __pgprot(pgprot_val(prot) | PTE_CONT); |
| 279 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 280 | init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 281 | |
| 282 | phys += next - addr; |
| 283 | } while (addr = next, addr != end); |
| 284 | } |
| 285 | |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 286 | static inline bool use_1G_block(unsigned long addr, unsigned long next, |
| 287 | unsigned long phys) |
| 288 | { |
| 289 | if (PAGE_SHIFT != 12) |
| 290 | return false; |
| 291 | |
| 292 | if (((addr | next | phys) & ~PUD_MASK) != 0) |
| 293 | return false; |
| 294 | |
| 295 | return true; |
| 296 | } |
| 297 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 298 | static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, |
| 299 | phys_addr_t phys, pgprot_t prot, |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 300 | phys_addr_t (*pgtable_alloc)(int), |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 301 | int flags) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 302 | { |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 303 | unsigned long next; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 304 | pud_t *pudp; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 305 | p4d_t *p4dp = p4d_offset(pgdp, addr); |
| 306 | p4d_t p4d = READ_ONCE(*p4dp); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 307 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 308 | if (p4d_none(p4d)) { |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 309 | phys_addr_t pud_phys; |
| 310 | BUG_ON(!pgtable_alloc); |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 311 | pud_phys = pgtable_alloc(PUD_SHIFT); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 312 | __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE); |
| 313 | p4d = READ_ONCE(*p4dp); |
Jungseok Lee | c79b954b | 2014-05-12 18:40:51 +0900 | [diff] [blame] | 314 | } |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 315 | BUG_ON(p4d_bad(p4d)); |
Jungseok Lee | c79b954b | 2014-05-12 18:40:51 +0900 | [diff] [blame] | 316 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 317 | pudp = pud_set_fixmap_offset(p4dp, addr); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 318 | do { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 319 | pud_t old_pud = READ_ONCE(*pudp); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 320 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 321 | next = pud_addr_end(addr, end); |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 322 | |
| 323 | /* |
| 324 | * For 4K granule only, attempt to put down a 1GB block |
| 325 | */ |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 326 | if (use_1G_block(addr, next, phys) && |
| 327 | (flags & NO_BLOCK_MAPPINGS) == 0) { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 328 | pud_set_huge(pudp, phys, prot); |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 329 | |
| 330 | /* |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 331 | * After the PUD entry has been populated once, we |
| 332 | * only allow updates to the permission attributes. |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 333 | */ |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 334 | BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 335 | READ_ONCE(pud_val(*pudp)))); |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 336 | } else { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 337 | alloc_init_cont_pmd(pudp, addr, next, phys, prot, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 338 | pgtable_alloc, flags); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 339 | |
| 340 | BUG_ON(pud_val(old_pud) != 0 && |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 341 | pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 342 | } |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 343 | phys += next - addr; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 344 | } while (pudp++, addr = next, addr != end); |
Mark Rutland | f471044 | 2016-01-25 11:45:08 +0000 | [diff] [blame] | 345 | |
| 346 | pud_clear_fixmap(); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 347 | } |
| 348 | |
Ard Biesheuvel | 40f87d3 | 2016-06-29 14:51:30 +0200 | [diff] [blame] | 349 | static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, |
| 350 | unsigned long virt, phys_addr_t size, |
| 351 | pgprot_t prot, |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 352 | phys_addr_t (*pgtable_alloc)(int), |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 353 | int flags) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 354 | { |
Masahiro Yamada | 32d1870 | 2019-11-03 21:35:58 +0900 | [diff] [blame] | 355 | unsigned long addr, end, next; |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 356 | pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 357 | |
Mark Rutland | cc5d2b3 | 2015-11-23 13:26:19 +0000 | [diff] [blame] | 358 | /* |
| 359 | * If the virtual and physical address don't have the same offset |
| 360 | * within a page, we cannot map the region as the caller expects. |
| 361 | */ |
| 362 | if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) |
| 363 | return; |
| 364 | |
Mark Rutland | 9c4e08a | 2015-11-23 13:26:20 +0000 | [diff] [blame] | 365 | phys &= PAGE_MASK; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 366 | addr = virt & PAGE_MASK; |
Masahiro Yamada | 32d1870 | 2019-11-03 21:35:58 +0900 | [diff] [blame] | 367 | end = PAGE_ALIGN(virt + size); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 368 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 369 | do { |
| 370 | next = pgd_addr_end(addr, end); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 371 | alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 372 | flags); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 373 | phys += next - addr; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 374 | } while (pgdp++, addr = next, addr != end); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 375 | } |
| 376 | |
Will Deacon | 475ba3f | 2019-04-08 11:23:48 +0100 | [diff] [blame] | 377 | static phys_addr_t __pgd_pgtable_alloc(int shift) |
Yu Zhao | 369aaab | 2019-03-11 18:57:47 -0600 | [diff] [blame] | 378 | { |
Mike Rapoport | 50f11a8 | 2019-07-11 20:58:02 -0700 | [diff] [blame] | 379 | void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); |
Yu Zhao | 369aaab | 2019-03-11 18:57:47 -0600 | [diff] [blame] | 380 | BUG_ON(!ptr); |
| 381 | |
| 382 | /* Ensure the zeroed page is visible to the page table walker */ |
| 383 | dsb(ishst); |
| 384 | return __pa(ptr); |
| 385 | } |
| 386 | |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 387 | static phys_addr_t pgd_pgtable_alloc(int shift) |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 388 | { |
Will Deacon | 475ba3f | 2019-04-08 11:23:48 +0100 | [diff] [blame] | 389 | phys_addr_t pa = __pgd_pgtable_alloc(shift); |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 390 | |
| 391 | /* |
| 392 | * Call proper page table ctor in case later we need to |
| 393 | * call core mm functions like apply_to_page_range() on |
| 394 | * this pre-allocated page table. |
| 395 | * |
| 396 | * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is |
| 397 | * folded, and if so pgtable_pmd_page_ctor() becomes nop. |
| 398 | */ |
| 399 | if (shift == PAGE_SHIFT) |
Mark Rutland | b4ed71f | 2019-09-25 16:49:46 -0700 | [diff] [blame] | 400 | BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); |
Yu Zhao | 90292ac | 2019-03-11 18:57:46 -0600 | [diff] [blame] | 401 | else if (shift == PMD_SHIFT) |
Will Deacon | 475ba3f | 2019-04-08 11:23:48 +0100 | [diff] [blame] | 402 | BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); |
Mark Rutland | 21ab99c | 2016-01-25 11:44:56 +0000 | [diff] [blame] | 403 | |
Will Deacon | 475ba3f | 2019-04-08 11:23:48 +0100 | [diff] [blame] | 404 | return pa; |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 405 | } |
| 406 | |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 407 | /* |
| 408 | * This function can only be used to modify existing table entries, |
| 409 | * without allocating new levels of table. Note that this permits the |
| 410 | * creation of new section or page entries. |
| 411 | */ |
| 412 | static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 413 | phys_addr_t size, pgprot_t prot) |
Mark Salter | d7ecbdd | 2014-03-12 12:28:06 -0400 | [diff] [blame] | 414 | { |
Mark Rutland | 77ad4ce | 2019-08-14 14:28:48 +0100 | [diff] [blame] | 415 | if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { |
Mark Salter | d7ecbdd | 2014-03-12 12:28:06 -0400 | [diff] [blame] | 416 | pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", |
| 417 | &phys, virt); |
| 418 | return; |
| 419 | } |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 420 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
| 421 | NO_CONT_MAPPINGS); |
Mark Salter | d7ecbdd | 2014-03-12 12:28:06 -0400 | [diff] [blame] | 422 | } |
| 423 | |
Ard Biesheuvel | 8ce837c | 2014-10-20 15:42:07 +0200 | [diff] [blame] | 424 | void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, |
| 425 | unsigned long virt, phys_addr_t size, |
Ard Biesheuvel | f14c66c | 2016-10-21 12:22:57 +0100 | [diff] [blame] | 426 | pgprot_t prot, bool page_mappings_only) |
Ard Biesheuvel | 8ce837c | 2014-10-20 15:42:07 +0200 | [diff] [blame] | 427 | { |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 428 | int flags = 0; |
| 429 | |
Ard Biesheuvel | 1378dc3 | 2016-07-22 19:32:25 +0200 | [diff] [blame] | 430 | BUG_ON(mm == &init_mm); |
| 431 | |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 432 | if (page_mappings_only) |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 433 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 434 | |
Mark Rutland | 11509a3 | 2016-01-25 11:45:10 +0000 | [diff] [blame] | 435 | __create_pgd_mapping(mm->pgd, phys, virt, size, prot, |
Ard Biesheuvel | c095136 | 2017-03-09 21:52:07 +0100 | [diff] [blame] | 436 | pgd_pgtable_alloc, flags); |
Mark Salter | d7ecbdd | 2014-03-12 12:28:06 -0400 | [diff] [blame] | 437 | } |
| 438 | |
Ard Biesheuvel | aa8c09b | 2017-03-09 21:52:00 +0100 | [diff] [blame] | 439 | static void update_mapping_prot(phys_addr_t phys, unsigned long virt, |
| 440 | phys_addr_t size, pgprot_t prot) |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 441 | { |
Mark Rutland | 77ad4ce | 2019-08-14 14:28:48 +0100 | [diff] [blame] | 442 | if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { |
Ard Biesheuvel | aa8c09b | 2017-03-09 21:52:00 +0100 | [diff] [blame] | 443 | pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 444 | &phys, virt); |
| 445 | return; |
| 446 | } |
| 447 | |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 448 | __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, |
| 449 | NO_CONT_MAPPINGS); |
Ard Biesheuvel | aa8c09b | 2017-03-09 21:52:00 +0100 | [diff] [blame] | 450 | |
| 451 | /* flush the TLBs after updating live kernel mappings */ |
| 452 | flush_tlb_kernel_range(virt, virt + size); |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 453 | } |
| 454 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 455 | static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 456 | phys_addr_t end, pgprot_t prot, int flags) |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 457 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 458 | __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 459 | prot, early_pgtable_alloc, flags); |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 460 | } |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 461 | |
Ard Biesheuvel | 5ea5306 | 2017-03-09 21:52:01 +0100 | [diff] [blame] | 462 | void __init mark_linear_text_alias_ro(void) |
| 463 | { |
| 464 | /* |
| 465 | * Remove the write permissions from the linear alias of .text/.rodata |
| 466 | */ |
| 467 | update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), |
| 468 | (unsigned long)__init_begin - (unsigned long)_text, |
| 469 | PAGE_KERNEL_RO); |
| 470 | } |
| 471 | |
Catalin Marinas | 50a1312 | 2020-11-19 17:55:56 +0000 | [diff] [blame] | 472 | static bool crash_mem_map __initdata; |
| 473 | |
| 474 | static int __init enable_crash_mem_map(char *arg) |
| 475 | { |
| 476 | /* |
| 477 | * Proper parameter parsing is done by reserve_crashkernel(). We only |
| 478 | * need to know if the linear map has to avoid block mappings so that |
| 479 | * the crashkernel reservations can be unmapped later. |
| 480 | */ |
| 481 | crash_mem_map = true; |
| 482 | |
| 483 | return 0; |
| 484 | } |
| 485 | early_param("crashkernel", enable_crash_mem_map); |
| 486 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 487 | static void __init map_mem(pgd_t *pgdp) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 488 | { |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 489 | phys_addr_t kernel_start = __pa_symbol(_text); |
| 490 | phys_addr_t kernel_end = __pa_symbol(__init_begin); |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 491 | phys_addr_t start, end; |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 492 | int flags = 0; |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 493 | u64 i; |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 494 | |
Catalin Marinas | 50a1312 | 2020-11-19 17:55:56 +0000 | [diff] [blame] | 495 | if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 496 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
| 497 | |
| 498 | /* |
| 499 | * Take care not to create a writable alias for the |
| 500 | * read-only text and rodata sections of the kernel image. |
| 501 | * So temporarily mark them as NOMAP to skip mappings in |
| 502 | * the following for-loop |
| 503 | */ |
| 504 | memblock_mark_nomap(kernel_start, kernel_end - kernel_start); |
Steve Capper | f6bc87c | 2013-04-30 11:00:33 +0100 | [diff] [blame] | 505 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 506 | /* map all the memory banks */ |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 507 | for_each_mem_range(i, &start, &end) { |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 508 | if (start >= end) |
| 509 | break; |
Catalin Marinas | 0178dc7 | 2019-11-27 09:51:13 +0000 | [diff] [blame] | 510 | /* |
| 511 | * The linear map must allow allocation tags reading/writing |
| 512 | * if MTE is present. Otherwise, it has the same attributes as |
| 513 | * PAGE_KERNEL. |
| 514 | */ |
Catalin Marinas | ffb9a77 | 2021-03-09 12:26:01 +0000 | [diff] [blame] | 515 | __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), |
| 516 | flags); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 517 | } |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 518 | |
| 519 | /* |
| 520 | * Map the linear alias of the [_text, __init_begin) interval |
| 521 | * as non-executable now, and remove the write permission in |
| 522 | * mark_linear_text_alias_ro() below (which will be called after |
| 523 | * alternative patching has completed). This makes the contents |
| 524 | * of the region accessible to subsystems such as hibernate, |
| 525 | * but protects it from inadvertent modification or execution. |
| 526 | * Note that contiguous mappings cannot be remapped in this way, |
| 527 | * so we should avoid them here. |
| 528 | */ |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 529 | __map_memblock(pgdp, kernel_start, kernel_end, |
Takahiro Akashi | 98d2e15 | 2017-04-03 11:24:34 +0900 | [diff] [blame] | 530 | PAGE_KERNEL, NO_CONT_MAPPINGS); |
| 531 | memblock_clear_nomap(kernel_start, kernel_end - kernel_start); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 532 | } |
| 533 | |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 534 | void mark_rodata_ro(void) |
| 535 | { |
Jeremy Linton | 2f39b5f | 2016-02-19 11:50:32 -0600 | [diff] [blame] | 536 | unsigned long section_size; |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 537 | |
Jeremy Linton | 2f39b5f | 2016-02-19 11:50:32 -0600 | [diff] [blame] | 538 | /* |
Ard Biesheuvel | 9fdc14c5 | 2016-06-23 15:53:17 +0200 | [diff] [blame] | 539 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
| 540 | * to cover NOTES and EXCEPTION_TABLE. |
Jeremy Linton | 2f39b5f | 2016-02-19 11:50:32 -0600 | [diff] [blame] | 541 | */ |
Ard Biesheuvel | 9fdc14c5 | 2016-06-23 15:53:17 +0200 | [diff] [blame] | 542 | section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; |
Ard Biesheuvel | aa8c09b | 2017-03-09 21:52:00 +0100 | [diff] [blame] | 543 | update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, |
Jeremy Linton | 2f39b5f | 2016-02-19 11:50:32 -0600 | [diff] [blame] | 544 | section_size, PAGE_KERNEL_RO); |
Ard Biesheuvel | e98216b | 2016-10-21 12:22:56 +0100 | [diff] [blame] | 545 | |
Laura Abbott | 1404d6f | 2016-10-27 09:27:34 -0700 | [diff] [blame] | 546 | debug_checkwx(); |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 547 | } |
Laura Abbott | da14170 | 2015-01-21 17:36:06 -0800 | [diff] [blame] | 548 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 549 | static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 550 | pgprot_t prot, struct vm_struct *vma, |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 551 | int flags, unsigned long vm_flags) |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 552 | { |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 553 | phys_addr_t pa_start = __pa_symbol(va_start); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 554 | unsigned long size = va_end - va_start; |
| 555 | |
| 556 | BUG_ON(!PAGE_ALIGNED(pa_start)); |
| 557 | BUG_ON(!PAGE_ALIGNED(size)); |
| 558 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 559 | __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 560 | early_pgtable_alloc, flags); |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 561 | |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 562 | if (!(vm_flags & VM_NO_GUARD)) |
| 563 | size += PAGE_SIZE; |
| 564 | |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 565 | vma->addr = va_start; |
| 566 | vma->phys_addr = pa_start; |
| 567 | vma->size = size; |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 568 | vma->flags = VM_MAP | vm_flags; |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 569 | vma->caller = __builtin_return_address(0); |
| 570 | |
| 571 | vm_area_add_early(vma); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 572 | } |
| 573 | |
Ard Biesheuvel | 28b066d | 2017-03-09 21:52:02 +0100 | [diff] [blame] | 574 | static int __init parse_rodata(char *arg) |
| 575 | { |
Ard Biesheuvel | c55191e | 2018-11-07 11:36:20 +0100 | [diff] [blame] | 576 | int ret = strtobool(arg, &rodata_enabled); |
| 577 | if (!ret) { |
| 578 | rodata_full = false; |
| 579 | return 0; |
| 580 | } |
| 581 | |
| 582 | /* permit 'full' in addition to boolean options */ |
| 583 | if (strcmp(arg, "full")) |
| 584 | return -EINVAL; |
| 585 | |
| 586 | rodata_enabled = true; |
| 587 | rodata_full = true; |
| 588 | return 0; |
Ard Biesheuvel | 28b066d | 2017-03-09 21:52:02 +0100 | [diff] [blame] | 589 | } |
| 590 | early_param("rodata", parse_rodata); |
| 591 | |
Will Deacon | 51a0048 | 2017-11-14 14:14:17 +0000 | [diff] [blame] | 592 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
| 593 | static int __init map_entry_trampoline(void) |
| 594 | { |
Will Deacon | 51a0048 | 2017-11-14 14:14:17 +0000 | [diff] [blame] | 595 | pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
| 596 | phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); |
| 597 | |
| 598 | /* The trampoline is always mapped and can therefore be global */ |
| 599 | pgprot_val(prot) &= ~PTE_NG; |
| 600 | |
| 601 | /* Map only the text into the trampoline page table */ |
| 602 | memset(tramp_pg_dir, 0, PGD_SIZE); |
| 603 | __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE, |
Will Deacon | 475ba3f | 2019-04-08 11:23:48 +0100 | [diff] [blame] | 604 | prot, __pgd_pgtable_alloc, 0); |
Will Deacon | 51a0048 | 2017-11-14 14:14:17 +0000 | [diff] [blame] | 605 | |
Will Deacon | 6c27c40 | 2017-12-06 11:24:02 +0000 | [diff] [blame] | 606 | /* Map both the text and data into the kernel page table */ |
Will Deacon | 51a0048 | 2017-11-14 14:14:17 +0000 | [diff] [blame] | 607 | __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot); |
Will Deacon | 6c27c40 | 2017-12-06 11:24:02 +0000 | [diff] [blame] | 608 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
| 609 | extern char __entry_tramp_data_start[]; |
| 610 | |
| 611 | __set_fixmap(FIX_ENTRY_TRAMP_DATA, |
| 612 | __pa_symbol(__entry_tramp_data_start), |
| 613 | PAGE_KERNEL_RO); |
| 614 | } |
| 615 | |
Will Deacon | 51a0048 | 2017-11-14 14:14:17 +0000 | [diff] [blame] | 616 | return 0; |
| 617 | } |
| 618 | core_initcall(map_entry_trampoline); |
| 619 | #endif |
| 620 | |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 621 | /* |
Mark Brown | c802728 | 2020-05-06 20:51:31 +0100 | [diff] [blame] | 622 | * Open coded check for BTI, only for use to determine configuration |
| 623 | * for early mappings for before the cpufeature code has run. |
| 624 | */ |
| 625 | static bool arm64_early_this_cpu_has_bti(void) |
| 626 | { |
| 627 | u64 pfr1; |
| 628 | |
| 629 | if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) |
| 630 | return false; |
| 631 | |
| 632 | pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); |
| 633 | return cpuid_feature_extract_unsigned_field(pfr1, |
| 634 | ID_AA64PFR1_BT_SHIFT); |
| 635 | } |
| 636 | |
| 637 | /* |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 638 | * Create fine-grained mappings for the kernel. |
| 639 | */ |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 640 | static void __init map_kernel(pgd_t *pgdp) |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 641 | { |
Ard Biesheuvel | 2ebe088b | 2017-03-09 21:52:03 +0100 | [diff] [blame] | 642 | static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, |
| 643 | vmlinux_initdata, vmlinux_data; |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 644 | |
Ard Biesheuvel | 28b066d | 2017-03-09 21:52:02 +0100 | [diff] [blame] | 645 | /* |
| 646 | * External debuggers may need to write directly to the text |
| 647 | * mapping to install SW breakpoints. Allow this (only) when |
| 648 | * explicitly requested with rodata=off. |
| 649 | */ |
| 650 | pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; |
| 651 | |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 652 | /* |
Mark Brown | c802728 | 2020-05-06 20:51:31 +0100 | [diff] [blame] | 653 | * If we have a CPU that supports BTI and a kernel built for |
| 654 | * BTI then mark the kernel executable text as guarded pages |
| 655 | * now so we don't have to rewrite the page tables later. |
| 656 | */ |
| 657 | if (arm64_early_this_cpu_has_bti()) |
| 658 | text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); |
| 659 | |
| 660 | /* |
Ard Biesheuvel | d27cfa1 | 2017-03-09 21:52:09 +0100 | [diff] [blame] | 661 | * Only rodata will be remapped with different permissions later on, |
| 662 | * all other segments are allowed to use contiguous mappings. |
| 663 | */ |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 664 | map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 665 | VM_NO_GUARD); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 666 | map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 667 | &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 668 | map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 669 | &vmlinux_inittext, 0, VM_NO_GUARD); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 670 | map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, |
Will Deacon | 92bbd16 | 2017-07-24 11:46:09 +0100 | [diff] [blame] | 671 | &vmlinux_initdata, 0, VM_NO_GUARD); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 672 | map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 673 | |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 674 | if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 675 | /* |
| 676 | * The fixmap falls in a separate pgd to the kernel, and doesn't |
| 677 | * live in the carveout for the swapper_pg_dir. We can simply |
| 678 | * re-use the existing dir for the fixmap. |
| 679 | */ |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 680 | set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 681 | READ_ONCE(*pgd_offset_k(FIXADDR_START))); |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 682 | } else if (CONFIG_PGTABLE_LEVELS > 3) { |
Mark Rutland | b333b0b | 2019-08-27 16:57:08 +0100 | [diff] [blame] | 683 | pgd_t *bm_pgdp; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 684 | p4d_t *bm_p4dp; |
Mark Rutland | b333b0b | 2019-08-27 16:57:08 +0100 | [diff] [blame] | 685 | pud_t *bm_pudp; |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 686 | /* |
| 687 | * The fixmap shares its top level pgd entry with the kernel |
| 688 | * mapping. This can really only occur when we are running |
| 689 | * with 16k/4 levels, so we can simply reuse the pud level |
| 690 | * entry instead. |
| 691 | */ |
| 692 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); |
Mike Rapoport | 974b9b2 | 2020-06-08 21:33:10 -0700 | [diff] [blame] | 693 | bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 694 | bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); |
| 695 | bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); |
Mark Rutland | b333b0b | 2019-08-27 16:57:08 +0100 | [diff] [blame] | 696 | pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 697 | pud_clear_fixmap(); |
| 698 | } else { |
| 699 | BUG(); |
| 700 | } |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 701 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 702 | kasan_copy_shadow(pgdp); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 705 | void __init paging_init(void) |
| 706 | { |
Jun Yao | 2330b7c | 2018-09-24 17:15:02 +0100 | [diff] [blame] | 707 | pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 708 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 709 | map_kernel(pgdp); |
| 710 | map_mem(pgdp); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 711 | |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 712 | pgd_clear_fixmap(); |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 713 | |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 714 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
Jun Yao | 2b5548b | 2018-09-24 15:47:49 +0100 | [diff] [blame] | 715 | init_mm.pgd = swapper_pg_dir; |
Mark Rutland | 068a17a | 2016-01-25 11:45:12 +0000 | [diff] [blame] | 716 | |
Jun Yao | 2b5548b | 2018-09-24 15:47:49 +0100 | [diff] [blame] | 717 | memblock_free(__pa_symbol(init_pg_dir), |
| 718 | __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); |
Ard Biesheuvel | 24cc61d | 2018-11-07 15:16:06 +0100 | [diff] [blame] | 719 | |
| 720 | memblock_allow_resize(); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 721 | } |
| 722 | |
| 723 | /* |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 724 | * Check whether a kernel address is valid (derived from arch/x86/). |
| 725 | */ |
| 726 | int kern_addr_valid(unsigned long addr) |
| 727 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 728 | pgd_t *pgdp; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 729 | p4d_t *p4dp; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 730 | pud_t *pudp, pud; |
| 731 | pmd_t *pmdp, pmd; |
| 732 | pte_t *ptep, pte; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 733 | |
Shyam Thombre | 8dd4daa | 2020-06-10 16:39:44 +0530 | [diff] [blame] | 734 | addr = arch_kasan_reset_tag(addr); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 735 | if ((((long)addr) >> VA_BITS) != -1UL) |
| 736 | return 0; |
| 737 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 738 | pgdp = pgd_offset_k(addr); |
| 739 | if (pgd_none(READ_ONCE(*pgdp))) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 740 | return 0; |
| 741 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 742 | p4dp = p4d_offset(pgdp, addr); |
| 743 | if (p4d_none(READ_ONCE(*p4dp))) |
| 744 | return 0; |
| 745 | |
| 746 | pudp = pud_offset(p4dp, addr); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 747 | pud = READ_ONCE(*pudp); |
| 748 | if (pud_none(pud)) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 749 | return 0; |
| 750 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 751 | if (pud_sect(pud)) |
| 752 | return pfn_valid(pud_pfn(pud)); |
Steve Capper | 206a2a7 | 2014-05-06 14:02:27 +0100 | [diff] [blame] | 753 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 754 | pmdp = pmd_offset(pudp, addr); |
| 755 | pmd = READ_ONCE(*pmdp); |
| 756 | if (pmd_none(pmd)) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 757 | return 0; |
| 758 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 759 | if (pmd_sect(pmd)) |
| 760 | return pfn_valid(pmd_pfn(pmd)); |
Dave Anderson | da6e4cb | 2014-04-15 18:53:24 +0100 | [diff] [blame] | 761 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 762 | ptep = pte_offset_kernel(pmdp, addr); |
| 763 | pte = READ_ONCE(*ptep); |
| 764 | if (pte_none(pte)) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 765 | return 0; |
| 766 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 767 | return pfn_valid(pte_pfn(pte)); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 768 | } |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 769 | |
| 770 | #ifdef CONFIG_MEMORY_HOTPLUG |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 771 | static void free_hotplug_page_range(struct page *page, size_t size, |
| 772 | struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 773 | { |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 774 | if (altmap) { |
| 775 | vmem_altmap_free(altmap, size >> PAGE_SHIFT); |
| 776 | } else { |
| 777 | WARN_ON(PageReserved(page)); |
| 778 | free_pages((unsigned long)page_address(page), get_order(size)); |
| 779 | } |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 780 | } |
| 781 | |
| 782 | static void free_hotplug_pgtable_page(struct page *page) |
| 783 | { |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 784 | free_hotplug_page_range(page, PAGE_SIZE, NULL); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | static bool pgtable_range_aligned(unsigned long start, unsigned long end, |
| 788 | unsigned long floor, unsigned long ceiling, |
| 789 | unsigned long mask) |
| 790 | { |
| 791 | start &= mask; |
| 792 | if (start < floor) |
| 793 | return false; |
| 794 | |
| 795 | if (ceiling) { |
| 796 | ceiling &= mask; |
| 797 | if (!ceiling) |
| 798 | return false; |
| 799 | } |
| 800 | |
| 801 | if (end - 1 > ceiling - 1) |
| 802 | return false; |
| 803 | return true; |
| 804 | } |
| 805 | |
| 806 | static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 807 | unsigned long end, bool free_mapped, |
| 808 | struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 809 | { |
| 810 | pte_t *ptep, pte; |
| 811 | |
| 812 | do { |
| 813 | ptep = pte_offset_kernel(pmdp, addr); |
| 814 | pte = READ_ONCE(*ptep); |
| 815 | if (pte_none(pte)) |
| 816 | continue; |
| 817 | |
| 818 | WARN_ON(!pte_present(pte)); |
| 819 | pte_clear(&init_mm, addr, ptep); |
| 820 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
| 821 | if (free_mapped) |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 822 | free_hotplug_page_range(pte_page(pte), |
| 823 | PAGE_SIZE, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 824 | } while (addr += PAGE_SIZE, addr < end); |
| 825 | } |
| 826 | |
| 827 | static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 828 | unsigned long end, bool free_mapped, |
| 829 | struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 830 | { |
| 831 | unsigned long next; |
| 832 | pmd_t *pmdp, pmd; |
| 833 | |
| 834 | do { |
| 835 | next = pmd_addr_end(addr, end); |
| 836 | pmdp = pmd_offset(pudp, addr); |
| 837 | pmd = READ_ONCE(*pmdp); |
| 838 | if (pmd_none(pmd)) |
| 839 | continue; |
| 840 | |
| 841 | WARN_ON(!pmd_present(pmd)); |
| 842 | if (pmd_sect(pmd)) { |
| 843 | pmd_clear(pmdp); |
| 844 | |
| 845 | /* |
| 846 | * One TLBI should be sufficient here as the PMD_SIZE |
| 847 | * range is mapped with a single block entry. |
| 848 | */ |
| 849 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
| 850 | if (free_mapped) |
| 851 | free_hotplug_page_range(pmd_page(pmd), |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 852 | PMD_SIZE, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 853 | continue; |
| 854 | } |
| 855 | WARN_ON(!pmd_table(pmd)); |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 856 | unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 857 | } while (addr = next, addr < end); |
| 858 | } |
| 859 | |
| 860 | static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 861 | unsigned long end, bool free_mapped, |
| 862 | struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 863 | { |
| 864 | unsigned long next; |
| 865 | pud_t *pudp, pud; |
| 866 | |
| 867 | do { |
| 868 | next = pud_addr_end(addr, end); |
| 869 | pudp = pud_offset(p4dp, addr); |
| 870 | pud = READ_ONCE(*pudp); |
| 871 | if (pud_none(pud)) |
| 872 | continue; |
| 873 | |
| 874 | WARN_ON(!pud_present(pud)); |
| 875 | if (pud_sect(pud)) { |
| 876 | pud_clear(pudp); |
| 877 | |
| 878 | /* |
| 879 | * One TLBI should be sufficient here as the PUD_SIZE |
| 880 | * range is mapped with a single block entry. |
| 881 | */ |
| 882 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); |
| 883 | if (free_mapped) |
| 884 | free_hotplug_page_range(pud_page(pud), |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 885 | PUD_SIZE, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 886 | continue; |
| 887 | } |
| 888 | WARN_ON(!pud_table(pud)); |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 889 | unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 890 | } while (addr = next, addr < end); |
| 891 | } |
| 892 | |
| 893 | static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 894 | unsigned long end, bool free_mapped, |
| 895 | struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 896 | { |
| 897 | unsigned long next; |
| 898 | p4d_t *p4dp, p4d; |
| 899 | |
| 900 | do { |
| 901 | next = p4d_addr_end(addr, end); |
| 902 | p4dp = p4d_offset(pgdp, addr); |
| 903 | p4d = READ_ONCE(*p4dp); |
| 904 | if (p4d_none(p4d)) |
| 905 | continue; |
| 906 | |
| 907 | WARN_ON(!p4d_present(p4d)); |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 908 | unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 909 | } while (addr = next, addr < end); |
| 910 | } |
| 911 | |
| 912 | static void unmap_hotplug_range(unsigned long addr, unsigned long end, |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 913 | bool free_mapped, struct vmem_altmap *altmap) |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 914 | { |
| 915 | unsigned long next; |
| 916 | pgd_t *pgdp, pgd; |
| 917 | |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 918 | /* |
| 919 | * altmap can only be used as vmemmap mapping backing memory. |
| 920 | * In case the backing memory itself is not being freed, then |
| 921 | * altmap is irrelevant. Warn about this inconsistency when |
| 922 | * encountered. |
| 923 | */ |
| 924 | WARN_ON(!free_mapped && altmap); |
| 925 | |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 926 | do { |
| 927 | next = pgd_addr_end(addr, end); |
| 928 | pgdp = pgd_offset_k(addr); |
| 929 | pgd = READ_ONCE(*pgdp); |
| 930 | if (pgd_none(pgd)) |
| 931 | continue; |
| 932 | |
| 933 | WARN_ON(!pgd_present(pgd)); |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 934 | unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 935 | } while (addr = next, addr < end); |
| 936 | } |
| 937 | |
| 938 | static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, |
| 939 | unsigned long end, unsigned long floor, |
| 940 | unsigned long ceiling) |
| 941 | { |
| 942 | pte_t *ptep, pte; |
| 943 | unsigned long i, start = addr; |
| 944 | |
| 945 | do { |
| 946 | ptep = pte_offset_kernel(pmdp, addr); |
| 947 | pte = READ_ONCE(*ptep); |
| 948 | |
| 949 | /* |
| 950 | * This is just a sanity check here which verifies that |
| 951 | * pte clearing has been done by earlier unmap loops. |
| 952 | */ |
| 953 | WARN_ON(!pte_none(pte)); |
| 954 | } while (addr += PAGE_SIZE, addr < end); |
| 955 | |
| 956 | if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) |
| 957 | return; |
| 958 | |
| 959 | /* |
| 960 | * Check whether we can free the pte page if the rest of the |
| 961 | * entries are empty. Overlap with other regions have been |
| 962 | * handled by the floor/ceiling check. |
| 963 | */ |
| 964 | ptep = pte_offset_kernel(pmdp, 0UL); |
| 965 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 966 | if (!pte_none(READ_ONCE(ptep[i]))) |
| 967 | return; |
| 968 | } |
| 969 | |
| 970 | pmd_clear(pmdp); |
| 971 | __flush_tlb_kernel_pgtable(start); |
| 972 | free_hotplug_pgtable_page(virt_to_page(ptep)); |
| 973 | } |
| 974 | |
| 975 | static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, |
| 976 | unsigned long end, unsigned long floor, |
| 977 | unsigned long ceiling) |
| 978 | { |
| 979 | pmd_t *pmdp, pmd; |
| 980 | unsigned long i, next, start = addr; |
| 981 | |
| 982 | do { |
| 983 | next = pmd_addr_end(addr, end); |
| 984 | pmdp = pmd_offset(pudp, addr); |
| 985 | pmd = READ_ONCE(*pmdp); |
| 986 | if (pmd_none(pmd)) |
| 987 | continue; |
| 988 | |
| 989 | WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); |
| 990 | free_empty_pte_table(pmdp, addr, next, floor, ceiling); |
| 991 | } while (addr = next, addr < end); |
| 992 | |
| 993 | if (CONFIG_PGTABLE_LEVELS <= 2) |
| 994 | return; |
| 995 | |
| 996 | if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) |
| 997 | return; |
| 998 | |
| 999 | /* |
| 1000 | * Check whether we can free the pmd page if the rest of the |
| 1001 | * entries are empty. Overlap with other regions have been |
| 1002 | * handled by the floor/ceiling check. |
| 1003 | */ |
| 1004 | pmdp = pmd_offset(pudp, 0UL); |
| 1005 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 1006 | if (!pmd_none(READ_ONCE(pmdp[i]))) |
| 1007 | return; |
| 1008 | } |
| 1009 | |
| 1010 | pud_clear(pudp); |
| 1011 | __flush_tlb_kernel_pgtable(start); |
| 1012 | free_hotplug_pgtable_page(virt_to_page(pmdp)); |
| 1013 | } |
| 1014 | |
| 1015 | static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, |
| 1016 | unsigned long end, unsigned long floor, |
| 1017 | unsigned long ceiling) |
| 1018 | { |
| 1019 | pud_t *pudp, pud; |
| 1020 | unsigned long i, next, start = addr; |
| 1021 | |
| 1022 | do { |
| 1023 | next = pud_addr_end(addr, end); |
| 1024 | pudp = pud_offset(p4dp, addr); |
| 1025 | pud = READ_ONCE(*pudp); |
| 1026 | if (pud_none(pud)) |
| 1027 | continue; |
| 1028 | |
| 1029 | WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); |
| 1030 | free_empty_pmd_table(pudp, addr, next, floor, ceiling); |
| 1031 | } while (addr = next, addr < end); |
| 1032 | |
| 1033 | if (CONFIG_PGTABLE_LEVELS <= 3) |
| 1034 | return; |
| 1035 | |
| 1036 | if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) |
| 1037 | return; |
| 1038 | |
| 1039 | /* |
| 1040 | * Check whether we can free the pud page if the rest of the |
| 1041 | * entries are empty. Overlap with other regions have been |
| 1042 | * handled by the floor/ceiling check. |
| 1043 | */ |
| 1044 | pudp = pud_offset(p4dp, 0UL); |
| 1045 | for (i = 0; i < PTRS_PER_PUD; i++) { |
| 1046 | if (!pud_none(READ_ONCE(pudp[i]))) |
| 1047 | return; |
| 1048 | } |
| 1049 | |
| 1050 | p4d_clear(p4dp); |
| 1051 | __flush_tlb_kernel_pgtable(start); |
| 1052 | free_hotplug_pgtable_page(virt_to_page(pudp)); |
| 1053 | } |
| 1054 | |
| 1055 | static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, |
| 1056 | unsigned long end, unsigned long floor, |
| 1057 | unsigned long ceiling) |
| 1058 | { |
| 1059 | unsigned long next; |
| 1060 | p4d_t *p4dp, p4d; |
| 1061 | |
| 1062 | do { |
| 1063 | next = p4d_addr_end(addr, end); |
| 1064 | p4dp = p4d_offset(pgdp, addr); |
| 1065 | p4d = READ_ONCE(*p4dp); |
| 1066 | if (p4d_none(p4d)) |
| 1067 | continue; |
| 1068 | |
| 1069 | WARN_ON(!p4d_present(p4d)); |
| 1070 | free_empty_pud_table(p4dp, addr, next, floor, ceiling); |
| 1071 | } while (addr = next, addr < end); |
| 1072 | } |
| 1073 | |
| 1074 | static void free_empty_tables(unsigned long addr, unsigned long end, |
| 1075 | unsigned long floor, unsigned long ceiling) |
| 1076 | { |
| 1077 | unsigned long next; |
| 1078 | pgd_t *pgdp, pgd; |
| 1079 | |
| 1080 | do { |
| 1081 | next = pgd_addr_end(addr, end); |
| 1082 | pgdp = pgd_offset_k(addr); |
| 1083 | pgd = READ_ONCE(*pgdp); |
| 1084 | if (pgd_none(pgd)) |
| 1085 | continue; |
| 1086 | |
| 1087 | WARN_ON(!pgd_present(pgd)); |
| 1088 | free_empty_p4d_table(pgdp, addr, next, floor, ceiling); |
| 1089 | } while (addr = next, addr < end); |
| 1090 | } |
| 1091 | #endif |
| 1092 | |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1093 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Suzuki K. Poulose | b433dce | 2015-10-19 14:19:28 +0100 | [diff] [blame] | 1094 | #if !ARM64_SWAPPER_USES_SECTION_MAPS |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 1095 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
| 1096 | struct vmem_altmap *altmap) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1097 | { |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 1098 | return vmemmap_populate_basepages(start, end, node, altmap); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1099 | } |
Suzuki K. Poulose | b433dce | 2015-10-19 14:19:28 +0100 | [diff] [blame] | 1100 | #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 1101 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
| 1102 | struct vmem_altmap *altmap) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1103 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 1104 | unsigned long addr = start; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1105 | unsigned long next; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1106 | pgd_t *pgdp; |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1107 | p4d_t *p4dp; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1108 | pud_t *pudp; |
| 1109 | pmd_t *pmdp; |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1110 | |
| 1111 | do { |
| 1112 | next = pmd_addr_end(addr, end); |
| 1113 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1114 | pgdp = vmemmap_pgd_populate(addr, node); |
| 1115 | if (!pgdp) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1116 | return -ENOMEM; |
| 1117 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1118 | p4dp = vmemmap_p4d_populate(pgdp, addr, node); |
| 1119 | if (!p4dp) |
| 1120 | return -ENOMEM; |
| 1121 | |
| 1122 | pudp = vmemmap_pud_populate(p4dp, addr, node); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1123 | if (!pudp) |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1124 | return -ENOMEM; |
| 1125 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1126 | pmdp = pmd_offset(pudp, addr); |
| 1127 | if (pmd_none(READ_ONCE(*pmdp))) { |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1128 | void *p = NULL; |
| 1129 | |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 1130 | p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1131 | if (!p) |
| 1132 | return -ENOMEM; |
| 1133 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1134 | pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1135 | } else |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1136 | vmemmap_verify((pte_t *)pmdp, node, addr, next); |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1137 | } while (addr = next, addr != end); |
| 1138 | |
| 1139 | return 0; |
| 1140 | } |
Odin Ugedal | 8e01076 | 2019-06-07 01:49:10 +0200 | [diff] [blame] | 1141 | #endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 1142 | void vmemmap_free(unsigned long start, unsigned long end, |
| 1143 | struct vmem_altmap *altmap) |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 1144 | { |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1145 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 1146 | WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); |
| 1147 | |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 1148 | unmap_hotplug_range(start, end, true, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1149 | free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); |
| 1150 | #endif |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 1151 | } |
Catalin Marinas | c1cc155 | 2012-03-05 11:49:27 +0000 | [diff] [blame] | 1152 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1153 | |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1154 | static inline pud_t * fixmap_pud(unsigned long addr) |
| 1155 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1156 | pgd_t *pgdp = pgd_offset_k(addr); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1157 | p4d_t *p4dp = p4d_offset(pgdp, addr); |
| 1158 | p4d_t p4d = READ_ONCE(*p4dp); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1159 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1160 | BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1161 | |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1162 | return pud_offset_kimg(p4dp, addr); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1163 | } |
| 1164 | |
| 1165 | static inline pmd_t * fixmap_pmd(unsigned long addr) |
| 1166 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1167 | pud_t *pudp = fixmap_pud(addr); |
| 1168 | pud_t pud = READ_ONCE(*pudp); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1169 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1170 | BUG_ON(pud_none(pud) || pud_bad(pud)); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1171 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1172 | return pmd_offset_kimg(pudp, addr); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1173 | } |
| 1174 | |
| 1175 | static inline pte_t * fixmap_pte(unsigned long addr) |
| 1176 | { |
Ard Biesheuvel | 157962f | 2016-02-16 13:52:38 +0100 | [diff] [blame] | 1177 | return &bm_pte[pte_index(addr)]; |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1178 | } |
| 1179 | |
Laura Abbott | 2077be6 | 2017-01-10 13:35:49 -0800 | [diff] [blame] | 1180 | /* |
| 1181 | * The p*d_populate functions call virt_to_phys implicitly so they can't be used |
| 1182 | * directly on kernel symbols (bm_p*d). This function is called too early to use |
| 1183 | * lm_alias so __p*d_populate functions must be used to populate with the |
| 1184 | * physical address from __pa_symbol. |
| 1185 | */ |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1186 | void __init early_fixmap_init(void) |
| 1187 | { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1188 | pgd_t *pgdp; |
| 1189 | p4d_t *p4dp, p4d; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1190 | pud_t *pudp; |
| 1191 | pmd_t *pmdp; |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1192 | unsigned long addr = FIXADDR_START; |
| 1193 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1194 | pgdp = pgd_offset_k(addr); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1195 | p4dp = p4d_offset(pgdp, addr); |
| 1196 | p4d = READ_ONCE(*p4dp); |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 1197 | if (CONFIG_PGTABLE_LEVELS > 3 && |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1198 | !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 1199 | /* |
| 1200 | * We only end up here if the kernel mapping and the fixmap |
| 1201 | * share the top level pgd entry, which should only happen on |
| 1202 | * 16k/4 levels configurations. |
| 1203 | */ |
| 1204 | BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1205 | pudp = pud_offset_kimg(p4dp, addr); |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 1206 | } else { |
Mike Rapoport | e9f6376 | 2020-06-04 16:46:23 -0700 | [diff] [blame] | 1207 | if (p4d_none(p4d)) |
| 1208 | __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1209 | pudp = fixmap_pud(addr); |
Ard Biesheuvel | f904077 | 2016-02-16 13:52:40 +0100 | [diff] [blame] | 1210 | } |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1211 | if (pud_none(READ_ONCE(*pudp))) |
| 1212 | __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); |
| 1213 | pmdp = fixmap_pmd(addr); |
| 1214 | __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1215 | |
| 1216 | /* |
| 1217 | * The boot-ioremap range spans multiple pmds, for which |
Ard Biesheuvel | 157962f | 2016-02-16 13:52:38 +0100 | [diff] [blame] | 1218 | * we are not prepared: |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1219 | */ |
| 1220 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) |
| 1221 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); |
| 1222 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1223 | if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) |
| 1224 | || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1225 | WARN_ON(1); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1226 | pr_warn("pmdp %p != %p, %p\n", |
| 1227 | pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1228 | fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); |
| 1229 | pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
| 1230 | fix_to_virt(FIX_BTMAP_BEGIN)); |
| 1231 | pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", |
| 1232 | fix_to_virt(FIX_BTMAP_END)); |
| 1233 | |
| 1234 | pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 1235 | pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); |
| 1236 | } |
| 1237 | } |
| 1238 | |
James Morse | 18b4b27 | 2017-11-06 18:44:26 +0000 | [diff] [blame] | 1239 | /* |
| 1240 | * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we |
| 1241 | * ever need to use IPIs for TLB broadcasting, then we're in trouble here. |
| 1242 | */ |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1243 | void __set_fixmap(enum fixed_addresses idx, |
| 1244 | phys_addr_t phys, pgprot_t flags) |
| 1245 | { |
| 1246 | unsigned long addr = __fix_to_virt(idx); |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1247 | pte_t *ptep; |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1248 | |
Mark Rutland | b63dbef | 2015-03-04 13:27:35 +0000 | [diff] [blame] | 1249 | BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1250 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1251 | ptep = fixmap_pte(addr); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1252 | |
| 1253 | if (pgprot_val(flags)) { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1254 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1255 | } else { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1256 | pte_clear(&init_mm, addr, ptep); |
Laura Abbott | af86e59 | 2014-11-21 21:50:42 +0000 | [diff] [blame] | 1257 | flush_tlb_kernel_range(addr, addr+PAGE_SIZE); |
| 1258 | } |
| 1259 | } |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1260 | |
Hsin-Yi Wang | e112b03 | 2019-08-23 14:24:50 +0800 | [diff] [blame] | 1261 | void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1262 | { |
| 1263 | const u64 dt_virt_base = __fix_to_virt(FIX_FDT); |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 1264 | int offset; |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1265 | void *dt_virt; |
| 1266 | |
| 1267 | /* |
| 1268 | * Check whether the physical FDT address is set and meets the minimum |
| 1269 | * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be |
Ard Biesheuvel | 04a8481 | 2016-08-01 13:29:31 +0200 | [diff] [blame] | 1270 | * at least 8 bytes so that we can always access the magic and size |
| 1271 | * fields of the FDT header after mapping the first chunk, double check |
| 1272 | * here if that is indeed the case. |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1273 | */ |
| 1274 | BUILD_BUG_ON(MIN_FDT_ALIGN < 8); |
| 1275 | if (!dt_phys || dt_phys % MIN_FDT_ALIGN) |
| 1276 | return NULL; |
| 1277 | |
| 1278 | /* |
| 1279 | * Make sure that the FDT region can be mapped without the need to |
| 1280 | * allocate additional translation table pages, so that it is safe |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 1281 | * to call create_mapping_noalloc() this early. |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1282 | * |
| 1283 | * On 64k pages, the FDT will be mapped using PTEs, so we need to |
| 1284 | * be in the same PMD as the rest of the fixmap. |
| 1285 | * On 4k pages, we'll use section mappings for the FDT so we only |
| 1286 | * have to be in the same PUD. |
| 1287 | */ |
| 1288 | BUILD_BUG_ON(dt_virt_base % SZ_2M); |
| 1289 | |
Suzuki K. Poulose | b433dce | 2015-10-19 14:19:28 +0100 | [diff] [blame] | 1290 | BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != |
| 1291 | __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1292 | |
Suzuki K. Poulose | b433dce | 2015-10-19 14:19:28 +0100 | [diff] [blame] | 1293 | offset = dt_phys % SWAPPER_BLOCK_SIZE; |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1294 | dt_virt = (void *)dt_virt_base + offset; |
| 1295 | |
| 1296 | /* map the first chunk so we can read the size from the header */ |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 1297 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), |
| 1298 | dt_virt_base, SWAPPER_BLOCK_SIZE, prot); |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1299 | |
Ard Biesheuvel | 04a8481 | 2016-08-01 13:29:31 +0200 | [diff] [blame] | 1300 | if (fdt_magic(dt_virt) != FDT_MAGIC) |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1301 | return NULL; |
| 1302 | |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 1303 | *size = fdt_totalsize(dt_virt); |
| 1304 | if (*size > MAX_FDT_SIZE) |
Ard Biesheuvel | 61bd93c | 2015-06-01 13:40:32 +0200 | [diff] [blame] | 1305 | return NULL; |
| 1306 | |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 1307 | if (offset + *size > SWAPPER_BLOCK_SIZE) |
Laura Abbott | 132233a | 2016-02-05 16:24:46 -0800 | [diff] [blame] | 1308 | create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
Ard Biesheuvel | f80fb3a | 2016-01-26 14:12:01 +0100 | [diff] [blame] | 1309 | round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); |
| 1310 | |
| 1311 | return dt_virt; |
| 1312 | } |
| 1313 | |
Anshuman Khandual | 0f472d0 | 2019-07-16 16:27:33 -0700 | [diff] [blame] | 1314 | int __init arch_ioremap_p4d_supported(void) |
| 1315 | { |
| 1316 | return 0; |
| 1317 | } |
| 1318 | |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1319 | int __init arch_ioremap_pud_supported(void) |
| 1320 | { |
Mark Rutland | 7ba36ec | 2019-05-14 14:30:06 +0530 | [diff] [blame] | 1321 | /* |
| 1322 | * Only 4k granule supports level 1 block mappings. |
| 1323 | * SW table walks can't handle removal of intermediate entries. |
| 1324 | */ |
| 1325 | return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && |
Steven Price | 102f45f | 2020-02-03 17:36:29 -0800 | [diff] [blame] | 1326 | !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1327 | } |
| 1328 | |
| 1329 | int __init arch_ioremap_pmd_supported(void) |
| 1330 | { |
Mark Rutland | 7ba36ec | 2019-05-14 14:30:06 +0530 | [diff] [blame] | 1331 | /* See arch_ioremap_pud_supported() */ |
Steven Price | 102f45f | 2020-02-03 17:36:29 -0800 | [diff] [blame] | 1332 | return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1333 | } |
| 1334 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1335 | int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1336 | { |
Anshuman Khandual | f7f0097 | 2019-05-27 09:28:15 +0530 | [diff] [blame] | 1337 | pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); |
Will Deacon | 15122ee | 2018-02-21 12:59:27 +0000 | [diff] [blame] | 1338 | |
Laura Abbott | 82034c2 | 2018-05-23 11:43:46 -0700 | [diff] [blame] | 1339 | /* Only allow permission changes for now */ |
| 1340 | if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), |
| 1341 | pud_val(new_pud))) |
Will Deacon | 15122ee | 2018-02-21 12:59:27 +0000 | [diff] [blame] | 1342 | return 0; |
| 1343 | |
Anshuman Khandual | 87dedf7 | 2019-05-27 12:33:29 +0530 | [diff] [blame] | 1344 | VM_BUG_ON(phys & ~PUD_MASK); |
Laura Abbott | 82034c2 | 2018-05-23 11:43:46 -0700 | [diff] [blame] | 1345 | set_pud(pudp, new_pud); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1346 | return 1; |
| 1347 | } |
| 1348 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1349 | int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1350 | { |
Anshuman Khandual | f7f0097 | 2019-05-27 09:28:15 +0530 | [diff] [blame] | 1351 | pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); |
Will Deacon | 15122ee | 2018-02-21 12:59:27 +0000 | [diff] [blame] | 1352 | |
Laura Abbott | 82034c2 | 2018-05-23 11:43:46 -0700 | [diff] [blame] | 1353 | /* Only allow permission changes for now */ |
| 1354 | if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), |
| 1355 | pmd_val(new_pmd))) |
Will Deacon | 15122ee | 2018-02-21 12:59:27 +0000 | [diff] [blame] | 1356 | return 0; |
| 1357 | |
Anshuman Khandual | 87dedf7 | 2019-05-27 12:33:29 +0530 | [diff] [blame] | 1358 | VM_BUG_ON(phys & ~PMD_MASK); |
Laura Abbott | 82034c2 | 2018-05-23 11:43:46 -0700 | [diff] [blame] | 1359 | set_pmd(pmdp, new_pmd); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1360 | return 1; |
| 1361 | } |
| 1362 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1363 | int pud_clear_huge(pud_t *pudp) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1364 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1365 | if (!pud_sect(READ_ONCE(*pudp))) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1366 | return 0; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1367 | pud_clear(pudp); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1368 | return 1; |
| 1369 | } |
| 1370 | |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1371 | int pmd_clear_huge(pmd_t *pmdp) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1372 | { |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1373 | if (!pmd_sect(READ_ONCE(*pmdp))) |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1374 | return 0; |
Will Deacon | 20a004e | 2018-02-15 11:14:56 +0000 | [diff] [blame] | 1375 | pmd_clear(pmdp); |
Ard Biesheuvel | 324420b | 2016-02-16 13:52:35 +0100 | [diff] [blame] | 1376 | return 1; |
| 1377 | } |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 1378 | |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1379 | int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 1380 | { |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1381 | pte_t *table; |
| 1382 | pmd_t pmd; |
| 1383 | |
| 1384 | pmd = READ_ONCE(*pmdp); |
| 1385 | |
Mark Rutland | fac880c | 2018-09-05 17:38:57 +0100 | [diff] [blame] | 1386 | if (!pmd_table(pmd)) { |
Will Deacon | 9c00697 | 2018-12-28 00:37:42 -0800 | [diff] [blame] | 1387 | VM_WARN_ON(1); |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1388 | return 1; |
| 1389 | } |
| 1390 | |
| 1391 | table = pte_offset_kernel(pmdp, addr); |
| 1392 | pmd_clear(pmdp); |
| 1393 | __flush_tlb_kernel_pgtable(addr); |
| 1394 | pte_free_kernel(NULL, table); |
| 1395 | return 1; |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 1396 | } |
| 1397 | |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1398 | int pud_free_pmd_page(pud_t *pudp, unsigned long addr) |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 1399 | { |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1400 | pmd_t *table; |
| 1401 | pmd_t *pmdp; |
| 1402 | pud_t pud; |
| 1403 | unsigned long next, end; |
| 1404 | |
| 1405 | pud = READ_ONCE(*pudp); |
| 1406 | |
Mark Rutland | fac880c | 2018-09-05 17:38:57 +0100 | [diff] [blame] | 1407 | if (!pud_table(pud)) { |
Will Deacon | 9c00697 | 2018-12-28 00:37:42 -0800 | [diff] [blame] | 1408 | VM_WARN_ON(1); |
Chintan Pandya | ec28bb9 | 2018-06-06 12:31:21 +0530 | [diff] [blame] | 1409 | return 1; |
| 1410 | } |
| 1411 | |
| 1412 | table = pmd_offset(pudp, addr); |
| 1413 | pmdp = table; |
| 1414 | next = addr; |
| 1415 | end = addr + PUD_SIZE; |
| 1416 | do { |
| 1417 | pmd_free_pte_page(pmdp, next); |
| 1418 | } while (pmdp++, next += PMD_SIZE, next != end); |
| 1419 | |
| 1420 | pud_clear(pudp); |
| 1421 | __flush_tlb_kernel_pgtable(addr); |
| 1422 | pmd_free(NULL, table); |
| 1423 | return 1; |
Toshi Kani | b6bdb75 | 2018-03-22 16:17:20 -0700 | [diff] [blame] | 1424 | } |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1425 | |
Will Deacon | 8e2d434 | 2018-12-28 00:37:53 -0800 | [diff] [blame] | 1426 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) |
| 1427 | { |
| 1428 | return 0; /* Don't attempt a block mapping */ |
| 1429 | } |
| 1430 | |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1431 | #ifdef CONFIG_MEMORY_HOTPLUG |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1432 | static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) |
| 1433 | { |
| 1434 | unsigned long end = start + size; |
| 1435 | |
| 1436 | WARN_ON(pgdir != init_mm.pgd); |
| 1437 | WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); |
| 1438 | |
Anshuman Khandual | eee0793 | 2020-08-06 23:23:29 -0700 | [diff] [blame] | 1439 | unmap_hotplug_range(start, end, false, NULL); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1440 | free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); |
| 1441 | } |
| 1442 | |
Anshuman Khandual | 58284a9 | 2020-11-13 13:00:14 +0530 | [diff] [blame] | 1443 | static bool inside_linear_region(u64 start, u64 size) |
| 1444 | { |
Pavel Tatashin | 68abc01 | 2021-03-29 10:28:47 -0400 | [diff] [blame] | 1445 | u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); |
| 1446 | u64 end_linear_pa = __pa(PAGE_END - 1); |
| 1447 | |
| 1448 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
| 1449 | /* |
| 1450 | * Check for a wrap, it is possible because of randomized linear |
| 1451 | * mapping the start physical address is actually bigger than |
| 1452 | * the end physical address. In this case set start to zero |
| 1453 | * because [0, end_linear_pa] range must still be able to cover |
| 1454 | * all addressable physical addresses. |
| 1455 | */ |
| 1456 | if (start_linear_pa > end_linear_pa) |
| 1457 | start_linear_pa = 0; |
| 1458 | } |
| 1459 | |
| 1460 | WARN_ON(start_linear_pa > end_linear_pa); |
| 1461 | |
Anshuman Khandual | 58284a9 | 2020-11-13 13:00:14 +0530 | [diff] [blame] | 1462 | /* |
| 1463 | * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] |
| 1464 | * accommodating both its ends but excluding PAGE_END. Max physical |
| 1465 | * range which can be mapped inside this linear mapping range, must |
| 1466 | * also be derived from its end points. |
| 1467 | */ |
Pavel Tatashin | 68abc01 | 2021-03-29 10:28:47 -0400 | [diff] [blame] | 1468 | return start >= start_linear_pa && (start + size - 1) <= end_linear_pa; |
Anshuman Khandual | 58284a9 | 2020-11-13 13:00:14 +0530 | [diff] [blame] | 1469 | } |
| 1470 | |
Michal Hocko | 940519f | 2019-05-13 17:21:26 -0700 | [diff] [blame] | 1471 | int arch_add_memory(int nid, u64 start, u64 size, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 1472 | struct mhp_params *params) |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1473 | { |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1474 | int ret, flags = 0; |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1475 | |
Anshuman Khandual | 58284a9 | 2020-11-13 13:00:14 +0530 | [diff] [blame] | 1476 | if (!inside_linear_region(start, size)) { |
| 1477 | pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); |
| 1478 | return -EINVAL; |
| 1479 | } |
| 1480 | |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1481 | if (rodata_full || debug_pagealloc_enabled()) |
| 1482 | flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; |
| 1483 | |
| 1484 | __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), |
Logan Gunthorpe | bfeb022 | 2020-04-10 14:33:36 -0700 | [diff] [blame] | 1485 | size, params->pgprot, __pgd_pgtable_alloc, |
| 1486 | flags); |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1487 | |
Dan Williams | 16993c0 | 2019-11-06 17:43:21 -0800 | [diff] [blame] | 1488 | memblock_clear_nomap(start, size); |
| 1489 | |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1490 | ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, |
Logan Gunthorpe | f5637d3 | 2020-04-10 14:33:21 -0700 | [diff] [blame] | 1491 | params); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1492 | if (ret) |
| 1493 | __remove_pgd_mapping(swapper_pg_dir, |
| 1494 | __phys_to_virt(start), size); |
Sudarshan Rajagopalan | 8980f9d | 2021-09-28 11:51:49 -0700 | [diff] [blame] | 1495 | else { |
| 1496 | max_pfn = PFN_UP(start + size); |
| 1497 | max_low_pfn = max_pfn; |
| 1498 | } |
| 1499 | |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1500 | return ret; |
Robin Murphy | 4ab2150 | 2018-12-11 18:48:48 +0000 | [diff] [blame] | 1501 | } |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1502 | |
David Hildenbrand | 22eb634 | 2019-07-18 15:56:41 -0700 | [diff] [blame] | 1503 | void arch_remove_memory(int nid, u64 start, u64 size, |
| 1504 | struct vmem_altmap *altmap) |
| 1505 | { |
| 1506 | unsigned long start_pfn = start >> PAGE_SHIFT; |
| 1507 | unsigned long nr_pages = size >> PAGE_SHIFT; |
David Hildenbrand | 22eb634 | 2019-07-18 15:56:41 -0700 | [diff] [blame] | 1508 | |
David Hildenbrand | feee6b2 | 2020-01-04 12:59:33 -0800 | [diff] [blame] | 1509 | __remove_pages(start_pfn, nr_pages, altmap); |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1510 | __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); |
David Hildenbrand | 22eb634 | 2019-07-18 15:56:41 -0700 | [diff] [blame] | 1511 | } |
Anshuman Khandual | bbd6ec6 | 2020-03-04 09:58:43 +0530 | [diff] [blame] | 1512 | |
| 1513 | /* |
| 1514 | * This memory hotplug notifier helps prevent boot memory from being |
| 1515 | * inadvertently removed as it blocks pfn range offlining process in |
| 1516 | * __offline_pages(). Hence this prevents both offlining as well as |
| 1517 | * removal process for boot memory which is initially always online. |
| 1518 | * In future if and when boot memory could be removed, this notifier |
| 1519 | * should be dropped and free_hotplug_page_range() should handle any |
| 1520 | * reserved pages allocated during boot. |
| 1521 | */ |
| 1522 | static int prevent_bootmem_remove_notifier(struct notifier_block *nb, |
| 1523 | unsigned long action, void *data) |
| 1524 | { |
| 1525 | struct mem_section *ms; |
| 1526 | struct memory_notify *arg = data; |
| 1527 | unsigned long end_pfn = arg->start_pfn + arg->nr_pages; |
| 1528 | unsigned long pfn = arg->start_pfn; |
| 1529 | |
| 1530 | if (action != MEM_GOING_OFFLINE) |
| 1531 | return NOTIFY_OK; |
| 1532 | |
| 1533 | for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
| 1534 | ms = __pfn_to_section(pfn); |
| 1535 | if (early_section(ms)) |
| 1536 | return NOTIFY_BAD; |
| 1537 | } |
| 1538 | return NOTIFY_OK; |
| 1539 | } |
| 1540 | |
| 1541 | static struct notifier_block prevent_bootmem_remove_nb = { |
| 1542 | .notifier_call = prevent_bootmem_remove_notifier, |
| 1543 | }; |
| 1544 | |
| 1545 | static int __init prevent_bootmem_remove_init(void) |
| 1546 | { |
| 1547 | return register_memory_notifier(&prevent_bootmem_remove_nb); |
| 1548 | } |
| 1549 | device_initcall(prevent_bootmem_remove_init); |
David Hildenbrand | 22eb634 | 2019-07-18 15:56:41 -0700 | [diff] [blame] | 1550 | #endif |