blob: 75df62fea1b68ab6ea2863a241c7b8a6e5ca6374 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Catalin Marinasc1cc1552012-03-05 11:49:27 +00002/*
3 * Based on arch/arm/mm/mmu.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Catalin Marinasc1cc1552012-03-05 11:49:27 +00007 */
8
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +08009#include <linux/cache.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000010#include <linux/export.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Takahiro Akashi98d2e152017-04-03 11:24:34 +090014#include <linux/ioport.h>
15#include <linux/kexec.h>
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +020016#include <linux/libfdt.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000017#include <linux/mman.h>
18#include <linux/nodemask.h>
19#include <linux/memblock.h>
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +053020#include <linux/memory.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000021#include <linux/fs.h>
Catalin Marinas2475ff92012-10-23 14:55:08 +010022#include <linux/io.h>
Laura Abbott2077be62017-01-10 13:35:49 -080023#include <linux/mm.h>
Tobias Klauser6efd8492017-05-15 13:40:20 +020024#include <linux/vmalloc.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000025
Mark Rutland21ab99c2016-01-25 11:44:56 +000026#include <asm/barrier.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000027#include <asm/cputype.h>
Laura Abbottaf86e592014-11-21 21:50:42 +000028#include <asm/fixmap.h>
Mark Rutland068a17a2016-01-25 11:45:12 +000029#include <asm/kasan.h>
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +010030#include <asm/kernel-pgtable.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000031#include <asm/sections.h>
32#include <asm/setup.h>
Masahiro Yamada87dfb312019-05-14 15:46:51 -070033#include <linux/sizes.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000034#include <asm/tlb.h>
35#include <asm/mmu_context.h>
Laura Abbott1404d6f2016-10-27 09:27:34 -070036#include <asm/ptdump.h>
Chintan Pandyaec28bb92018-06-06 12:31:21 +053037#include <asm/tlbflush.h>
Mike Rapoportca15ca42020-08-06 23:22:28 -070038#include <asm/pgalloc.h>
Catalin Marinasc1cc1552012-03-05 11:49:27 +000039
Ard Biesheuvelc0951362017-03-09 21:52:07 +010040#define NO_BLOCK_MAPPINGS BIT(0)
Ard Biesheuveld27cfa12017-03-09 21:52:09 +010041#define NO_CONT_MAPPINGS BIT(1)
Ard Biesheuvelc0951362017-03-09 21:52:07 +010042
Ard Biesheuveldd006da2015-03-19 16:42:27 +000043u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
Kristina Martsenkofa2a8442017-12-13 17:07:24 +000044u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
Ard Biesheuveldd006da2015-03-19 16:42:27 +000045
Steve Capper5383cc62019-08-07 16:55:18 +010046u64 __section(".mmuoff.data.write") vabits_actual;
47EXPORT_SYMBOL(vabits_actual);
Catalin Marinasc1cc1552012-03-05 11:49:27 +000048
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080049u64 kimage_voffset __ro_after_init;
Ard Biesheuvela7f8de12016-02-16 13:52:42 +010050EXPORT_SYMBOL(kimage_voffset);
51
Catalin Marinasc1cc1552012-03-05 11:49:27 +000052/*
53 * Empty_zero_page is a special page that is used for zero-initialized data
54 * and COW.
55 */
Mark Rutland5227cfa2016-01-25 11:44:57 +000056unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
Catalin Marinasc1cc1552012-03-05 11:49:27 +000057EXPORT_SYMBOL(empty_zero_page);
58
Ard Biesheuvelf9040772016-02-16 13:52:40 +010059static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
60static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
61static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
62
Jun Yao2330b7c2018-09-24 17:15:02 +010063static DEFINE_SPINLOCK(swapper_pgdir_lock);
64
65void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
66{
67 pgd_t *fixmap_pgdp;
68
69 spin_lock(&swapper_pgdir_lock);
James Morse26a6f872018-10-10 15:43:22 +010070 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
Jun Yao2330b7c2018-09-24 17:15:02 +010071 WRITE_ONCE(*fixmap_pgdp, pgd);
72 /*
73 * We need dsb(ishst) here to ensure the page-table-walker sees
74 * our new entry before set_p?d() returns. The fixmap's
75 * flush_tlb_kernel_range() via clear_fixmap() does this for us.
76 */
77 pgd_clear_fixmap();
78 spin_unlock(&swapper_pgdir_lock);
79}
80
Catalin Marinasc1cc1552012-03-05 11:49:27 +000081pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
82 unsigned long size, pgprot_t vma_prot)
83{
84 if (!pfn_valid(pfn))
85 return pgprot_noncached(vma_prot);
86 else if (file->f_flags & O_SYNC)
87 return pgprot_writecombine(vma_prot);
88 return vma_prot;
89}
90EXPORT_SYMBOL(phys_mem_access_prot);
91
Yu Zhao90292ac2019-03-11 18:57:46 -060092static phys_addr_t __init early_pgtable_alloc(int shift)
Catalin Marinasc1cc1552012-03-05 11:49:27 +000093{
Suzuki K. Poulose71423922015-11-20 17:45:40 +000094 phys_addr_t phys;
95 void *ptr;
96
Mike Rapoport9a8dd702018-10-30 15:07:59 -070097 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
Mike Rapoportecc3e772019-03-11 23:29:26 -070098 if (!phys)
99 panic("Failed to allocate page table page\n");
Mark Rutlandf4710442016-01-25 11:45:08 +0000100
101 /*
102 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
103 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
104 * any level of table.
105 */
106 ptr = pte_set_fixmap(phys);
107
Mark Rutland21ab99c2016-01-25 11:44:56 +0000108 memset(ptr, 0, PAGE_SIZE);
109
Mark Rutlandf4710442016-01-25 11:45:08 +0000110 /*
111 * Implicit barriers also ensure the zeroed page is visible to the page
112 * table walker
113 */
114 pte_clear_fixmap();
115
116 return phys;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000117}
118
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100119static bool pgattr_change_is_safe(u64 old, u64 new)
120{
121 /*
122 * The following mapping attributes may be updated in live
123 * kernel mappings without the need for break-before-make.
124 */
Ard Biesheuvel753e8ab2018-02-23 18:04:48 +0000125 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100126
Ard Biesheuvel141d1492017-03-09 21:52:06 +0100127 /* creating or taking down mappings is always safe */
128 if (old == 0 || new == 0)
129 return true;
130
131 /* live contiguous mappings may not be manipulated at all */
132 if ((old | new) & PTE_CONT)
133 return false;
134
Ard Biesheuvel753e8ab2018-02-23 18:04:48 +0000135 /* Transitioning from Non-Global to Global is unsafe */
136 if (old & ~new & PTE_NG)
137 return false;
Will Deacon4e602052018-01-29 11:59:54 +0000138
Ard Biesheuvel141d1492017-03-09 21:52:06 +0100139 return ((old ^ new) & ~mask) == 0;
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100140}
141
Will Deacon20a004e2018-02-15 11:14:56 +0000142static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100143 phys_addr_t phys, pgprot_t prot)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000144{
Will Deacon20a004e2018-02-15 11:14:56 +0000145 pte_t *ptep;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000146
Will Deacon20a004e2018-02-15 11:14:56 +0000147 ptep = pte_set_fixmap_offset(pmdp, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000148 do {
Will Deacon20a004e2018-02-15 11:14:56 +0000149 pte_t old_pte = READ_ONCE(*ptep);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100150
Will Deacon20a004e2018-02-15 11:14:56 +0000151 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100152
153 /*
154 * After the PTE entry has been populated once, we
155 * only allow updates to the permission attributes.
156 */
Will Deacon20a004e2018-02-15 11:14:56 +0000157 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
158 READ_ONCE(pte_val(*ptep))));
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100159
Ard Biesheuvele393cf42017-03-09 21:52:04 +0100160 phys += PAGE_SIZE;
Will Deacon20a004e2018-02-15 11:14:56 +0000161 } while (ptep++, addr += PAGE_SIZE, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000162
163 pte_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000164}
165
Will Deacon20a004e2018-02-15 11:14:56 +0000166static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100167 unsigned long end, phys_addr_t phys,
168 pgprot_t prot,
Yu Zhao90292ac2019-03-11 18:57:46 -0600169 phys_addr_t (*pgtable_alloc)(int),
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100170 int flags)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000171{
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000172 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000173 pmd_t pmd = READ_ONCE(*pmdp);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000174
Will Deacon20a004e2018-02-15 11:14:56 +0000175 BUG_ON(pmd_sect(pmd));
176 if (pmd_none(pmd)) {
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100177 phys_addr_t pte_phys;
Laura Abbott132233a2016-02-05 16:24:46 -0800178 BUG_ON(!pgtable_alloc);
Yu Zhao90292ac2019-03-11 18:57:46 -0600179 pte_phys = pgtable_alloc(PAGE_SHIFT);
Will Deacon20a004e2018-02-15 11:14:56 +0000180 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
181 pmd = READ_ONCE(*pmdp);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000182 }
Will Deacon20a004e2018-02-15 11:14:56 +0000183 BUG_ON(pmd_bad(pmd));
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100184
185 do {
186 pgprot_t __prot = prot;
187
188 next = pte_cont_addr_end(addr, end);
189
190 /* use a contiguous mapping if the range is suitably aligned */
191 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
192 (flags & NO_CONT_MAPPINGS) == 0)
193 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
194
Will Deacon20a004e2018-02-15 11:14:56 +0000195 init_pte(pmdp, addr, next, phys, __prot);
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100196
197 phys += next - addr;
198 } while (addr = next, addr != end);
199}
200
Will Deacon20a004e2018-02-15 11:14:56 +0000201static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100202 phys_addr_t phys, pgprot_t prot,
Yu Zhao90292ac2019-03-11 18:57:46 -0600203 phys_addr_t (*pgtable_alloc)(int), int flags)
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100204{
205 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000206 pmd_t *pmdp;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000207
Will Deacon20a004e2018-02-15 11:14:56 +0000208 pmdp = pmd_set_fixmap_offset(pudp, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000209 do {
Will Deacon20a004e2018-02-15 11:14:56 +0000210 pmd_t old_pmd = READ_ONCE(*pmdp);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100211
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000212 next = pmd_addr_end(addr, end);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100213
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000214 /* try section mapping first */
Laura Abbott83863f22016-02-05 16:24:47 -0800215 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100216 (flags & NO_BLOCK_MAPPINGS) == 0) {
Will Deacon20a004e2018-02-15 11:14:56 +0000217 pmd_set_huge(pmdp, phys, prot);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100218
Catalin Marinasa55f9922014-02-04 16:01:31 +0000219 /*
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100220 * After the PMD entry has been populated once, we
221 * only allow updates to the permission attributes.
Catalin Marinasa55f9922014-02-04 16:01:31 +0000222 */
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100223 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
Will Deacon20a004e2018-02-15 11:14:56 +0000224 READ_ONCE(pmd_val(*pmdp))));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000225 } else {
Will Deacon20a004e2018-02-15 11:14:56 +0000226 alloc_init_cont_pte(pmdp, addr, next, phys, prot,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100227 pgtable_alloc, flags);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100228
229 BUG_ON(pmd_val(old_pmd) != 0 &&
Will Deacon20a004e2018-02-15 11:14:56 +0000230 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
Catalin Marinasa55f9922014-02-04 16:01:31 +0000231 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000232 phys += next - addr;
Will Deacon20a004e2018-02-15 11:14:56 +0000233 } while (pmdp++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000234
235 pmd_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000236}
237
Will Deacon20a004e2018-02-15 11:14:56 +0000238static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100239 unsigned long end, phys_addr_t phys,
240 pgprot_t prot,
Yu Zhao90292ac2019-03-11 18:57:46 -0600241 phys_addr_t (*pgtable_alloc)(int), int flags)
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100242{
243 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000244 pud_t pud = READ_ONCE(*pudp);
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100245
246 /*
247 * Check for initial section mappings in the pgd/pud.
248 */
Will Deacon20a004e2018-02-15 11:14:56 +0000249 BUG_ON(pud_sect(pud));
250 if (pud_none(pud)) {
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100251 phys_addr_t pmd_phys;
252 BUG_ON(!pgtable_alloc);
Yu Zhao90292ac2019-03-11 18:57:46 -0600253 pmd_phys = pgtable_alloc(PMD_SHIFT);
Will Deacon20a004e2018-02-15 11:14:56 +0000254 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
255 pud = READ_ONCE(*pudp);
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100256 }
Will Deacon20a004e2018-02-15 11:14:56 +0000257 BUG_ON(pud_bad(pud));
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100258
259 do {
260 pgprot_t __prot = prot;
261
262 next = pmd_cont_addr_end(addr, end);
263
264 /* use a contiguous mapping if the range is suitably aligned */
265 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
266 (flags & NO_CONT_MAPPINGS) == 0)
267 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
268
Will Deacon20a004e2018-02-15 11:14:56 +0000269 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100270
271 phys += next - addr;
272 } while (addr = next, addr != end);
273}
274
Laura Abbottda141702015-01-21 17:36:06 -0800275static inline bool use_1G_block(unsigned long addr, unsigned long next,
276 unsigned long phys)
277{
278 if (PAGE_SHIFT != 12)
279 return false;
280
281 if (((addr | next | phys) & ~PUD_MASK) != 0)
282 return false;
283
284 return true;
285}
286
Will Deacon20a004e2018-02-15 11:14:56 +0000287static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
288 phys_addr_t phys, pgprot_t prot,
Yu Zhao90292ac2019-03-11 18:57:46 -0600289 phys_addr_t (*pgtable_alloc)(int),
Will Deacon20a004e2018-02-15 11:14:56 +0000290 int flags)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000291{
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000292 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000293 pud_t *pudp;
Mike Rapoporte9f63762020-06-04 16:46:23 -0700294 p4d_t *p4dp = p4d_offset(pgdp, addr);
295 p4d_t p4d = READ_ONCE(*p4dp);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000296
Mike Rapoporte9f63762020-06-04 16:46:23 -0700297 if (p4d_none(p4d)) {
Laura Abbott132233a2016-02-05 16:24:46 -0800298 phys_addr_t pud_phys;
299 BUG_ON(!pgtable_alloc);
Yu Zhao90292ac2019-03-11 18:57:46 -0600300 pud_phys = pgtable_alloc(PUD_SHIFT);
Mike Rapoporte9f63762020-06-04 16:46:23 -0700301 __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
302 p4d = READ_ONCE(*p4dp);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900303 }
Mike Rapoporte9f63762020-06-04 16:46:23 -0700304 BUG_ON(p4d_bad(p4d));
Jungseok Leec79b954b2014-05-12 18:40:51 +0900305
Mike Rapoporte9f63762020-06-04 16:46:23 -0700306 pudp = pud_set_fixmap_offset(p4dp, addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000307 do {
Will Deacon20a004e2018-02-15 11:14:56 +0000308 pud_t old_pud = READ_ONCE(*pudp);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100309
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000310 next = pud_addr_end(addr, end);
Steve Capper206a2a72014-05-06 14:02:27 +0100311
312 /*
313 * For 4K granule only, attempt to put down a 1GB block
314 */
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100315 if (use_1G_block(addr, next, phys) &&
316 (flags & NO_BLOCK_MAPPINGS) == 0) {
Will Deacon20a004e2018-02-15 11:14:56 +0000317 pud_set_huge(pudp, phys, prot);
Steve Capper206a2a72014-05-06 14:02:27 +0100318
319 /*
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100320 * After the PUD entry has been populated once, we
321 * only allow updates to the permission attributes.
Steve Capper206a2a72014-05-06 14:02:27 +0100322 */
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100323 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
Will Deacon20a004e2018-02-15 11:14:56 +0000324 READ_ONCE(pud_val(*pudp))));
Steve Capper206a2a72014-05-06 14:02:27 +0100325 } else {
Will Deacon20a004e2018-02-15 11:14:56 +0000326 alloc_init_cont_pmd(pudp, addr, next, phys, prot,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100327 pgtable_alloc, flags);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100328
329 BUG_ON(pud_val(old_pud) != 0 &&
Will Deacon20a004e2018-02-15 11:14:56 +0000330 pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
Steve Capper206a2a72014-05-06 14:02:27 +0100331 }
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000332 phys += next - addr;
Will Deacon20a004e2018-02-15 11:14:56 +0000333 } while (pudp++, addr = next, addr != end);
Mark Rutlandf4710442016-01-25 11:45:08 +0000334
335 pud_clear_fixmap();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000336}
337
Ard Biesheuvel40f87d32016-06-29 14:51:30 +0200338static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
339 unsigned long virt, phys_addr_t size,
340 pgprot_t prot,
Yu Zhao90292ac2019-03-11 18:57:46 -0600341 phys_addr_t (*pgtable_alloc)(int),
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100342 int flags)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000343{
Masahiro Yamada32d18702019-11-03 21:35:58 +0900344 unsigned long addr, end, next;
Mike Rapoport974b9b22020-06-08 21:33:10 -0700345 pgd_t *pgdp = pgd_offset_pgd(pgdir, virt);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000346
Mark Rutlandcc5d2b32015-11-23 13:26:19 +0000347 /*
348 * If the virtual and physical address don't have the same offset
349 * within a page, we cannot map the region as the caller expects.
350 */
351 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
352 return;
353
Mark Rutland9c4e08a2015-11-23 13:26:20 +0000354 phys &= PAGE_MASK;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000355 addr = virt & PAGE_MASK;
Masahiro Yamada32d18702019-11-03 21:35:58 +0900356 end = PAGE_ALIGN(virt + size);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000357
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000358 do {
359 next = pgd_addr_end(addr, end);
Will Deacon20a004e2018-02-15 11:14:56 +0000360 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100361 flags);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000362 phys += next - addr;
Will Deacon20a004e2018-02-15 11:14:56 +0000363 } while (pgdp++, addr = next, addr != end);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000364}
365
Will Deacon475ba3f2019-04-08 11:23:48 +0100366static phys_addr_t __pgd_pgtable_alloc(int shift)
Yu Zhao369aaab2019-03-11 18:57:47 -0600367{
Mike Rapoport50f11a82019-07-11 20:58:02 -0700368 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
Yu Zhao369aaab2019-03-11 18:57:47 -0600369 BUG_ON(!ptr);
370
371 /* Ensure the zeroed page is visible to the page table walker */
372 dsb(ishst);
373 return __pa(ptr);
374}
375
Yu Zhao90292ac2019-03-11 18:57:46 -0600376static phys_addr_t pgd_pgtable_alloc(int shift)
Laura Abbottda141702015-01-21 17:36:06 -0800377{
Will Deacon475ba3f2019-04-08 11:23:48 +0100378 phys_addr_t pa = __pgd_pgtable_alloc(shift);
Yu Zhao90292ac2019-03-11 18:57:46 -0600379
380 /*
381 * Call proper page table ctor in case later we need to
382 * call core mm functions like apply_to_page_range() on
383 * this pre-allocated page table.
384 *
385 * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
386 * folded, and if so pgtable_pmd_page_ctor() becomes nop.
387 */
388 if (shift == PAGE_SHIFT)
Mark Rutlandb4ed71f2019-09-25 16:49:46 -0700389 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
Yu Zhao90292ac2019-03-11 18:57:46 -0600390 else if (shift == PMD_SHIFT)
Will Deacon475ba3f2019-04-08 11:23:48 +0100391 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
Mark Rutland21ab99c2016-01-25 11:44:56 +0000392
Will Deacon475ba3f2019-04-08 11:23:48 +0100393 return pa;
Laura Abbottda141702015-01-21 17:36:06 -0800394}
395
Laura Abbott132233a2016-02-05 16:24:46 -0800396/*
397 * This function can only be used to modify existing table entries,
398 * without allocating new levels of table. Note that this permits the
399 * creation of new section or page entries.
400 */
401static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
Laura Abbottda141702015-01-21 17:36:06 -0800402 phys_addr_t size, pgprot_t prot)
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400403{
Mark Rutland77ad4ce2019-08-14 14:28:48 +0100404 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400405 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
406 &phys, virt);
407 return;
408 }
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100409 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
410 NO_CONT_MAPPINGS);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400411}
412
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200413void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
414 unsigned long virt, phys_addr_t size,
Ard Biesheuvelf14c66c2016-10-21 12:22:57 +0100415 pgprot_t prot, bool page_mappings_only)
Ard Biesheuvel8ce837c2014-10-20 15:42:07 +0200416{
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100417 int flags = 0;
418
Ard Biesheuvel1378dc32016-07-22 19:32:25 +0200419 BUG_ON(mm == &init_mm);
420
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100421 if (page_mappings_only)
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100422 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100423
Mark Rutland11509a32016-01-25 11:45:10 +0000424 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
Ard Biesheuvelc0951362017-03-09 21:52:07 +0100425 pgd_pgtable_alloc, flags);
Mark Salterd7ecbdd2014-03-12 12:28:06 -0400426}
427
Ard Biesheuvelaa8c09b2017-03-09 21:52:00 +0100428static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
429 phys_addr_t size, pgprot_t prot)
Laura Abbottda141702015-01-21 17:36:06 -0800430{
Mark Rutland77ad4ce2019-08-14 14:28:48 +0100431 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
Ard Biesheuvelaa8c09b2017-03-09 21:52:00 +0100432 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
Laura Abbottda141702015-01-21 17:36:06 -0800433 &phys, virt);
434 return;
435 }
436
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100437 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
438 NO_CONT_MAPPINGS);
Ard Biesheuvelaa8c09b2017-03-09 21:52:00 +0100439
440 /* flush the TLBs after updating live kernel mappings */
441 flush_tlb_kernel_range(virt, virt + size);
Laura Abbottda141702015-01-21 17:36:06 -0800442}
443
Will Deacon20a004e2018-02-15 11:14:56 +0000444static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900445 phys_addr_t end, pgprot_t prot, int flags)
Laura Abbottda141702015-01-21 17:36:06 -0800446{
Will Deacon20a004e2018-02-15 11:14:56 +0000447 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900448 prot, early_pgtable_alloc, flags);
Laura Abbottda141702015-01-21 17:36:06 -0800449}
Laura Abbottda141702015-01-21 17:36:06 -0800450
Ard Biesheuvel5ea53062017-03-09 21:52:01 +0100451void __init mark_linear_text_alias_ro(void)
452{
453 /*
454 * Remove the write permissions from the linear alias of .text/.rodata
455 */
456 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
457 (unsigned long)__init_begin - (unsigned long)_text,
458 PAGE_KERNEL_RO);
459}
460
Will Deacon20a004e2018-02-15 11:14:56 +0000461static void __init map_mem(pgd_t *pgdp)
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000462{
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900463 phys_addr_t kernel_start = __pa_symbol(_text);
464 phys_addr_t kernel_end = __pa_symbol(__init_begin);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000465 struct memblock_region *reg;
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900466 int flags = 0;
467
Ard Biesheuvelc55191e2018-11-07 11:36:20 +0100468 if (rodata_full || debug_pagealloc_enabled())
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900469 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
470
471 /*
472 * Take care not to create a writable alias for the
473 * read-only text and rodata sections of the kernel image.
474 * So temporarily mark them as NOMAP to skip mappings in
475 * the following for-loop
476 */
477 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
478#ifdef CONFIG_KEXEC_CORE
479 if (crashk_res.end)
480 memblock_mark_nomap(crashk_res.start,
481 resource_size(&crashk_res));
482#endif
Steve Capperf6bc87c2013-04-30 11:00:33 +0100483
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000484 /* map all the memory banks */
485 for_each_memblock(memory, reg) {
486 phys_addr_t start = reg->base;
487 phys_addr_t end = start + reg->size;
488
489 if (start >= end)
490 break;
Ard Biesheuvel68709f42015-11-30 13:28:16 +0100491 if (memblock_is_nomap(reg))
492 continue;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000493
Will Deacon20a004e2018-02-15 11:14:56 +0000494 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000495 }
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900496
497 /*
498 * Map the linear alias of the [_text, __init_begin) interval
499 * as non-executable now, and remove the write permission in
500 * mark_linear_text_alias_ro() below (which will be called after
501 * alternative patching has completed). This makes the contents
502 * of the region accessible to subsystems such as hibernate,
503 * but protects it from inadvertent modification or execution.
504 * Note that contiguous mappings cannot be remapped in this way,
505 * so we should avoid them here.
506 */
Will Deacon20a004e2018-02-15 11:14:56 +0000507 __map_memblock(pgdp, kernel_start, kernel_end,
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900508 PAGE_KERNEL, NO_CONT_MAPPINGS);
509 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
510
511#ifdef CONFIG_KEXEC_CORE
512 /*
513 * Use page-level mappings here so that we can shrink the region
514 * in page granularity and put back unused memory to buddy system
515 * through /sys/kernel/kexec_crash_size interface.
516 */
517 if (crashk_res.end) {
Will Deacon20a004e2018-02-15 11:14:56 +0000518 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
Takahiro Akashi98d2e152017-04-03 11:24:34 +0900519 PAGE_KERNEL,
520 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
521 memblock_clear_nomap(crashk_res.start,
522 resource_size(&crashk_res));
523 }
524#endif
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000525}
526
Laura Abbottda141702015-01-21 17:36:06 -0800527void mark_rodata_ro(void)
528{
Jeremy Linton2f39b5f2016-02-19 11:50:32 -0600529 unsigned long section_size;
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100530
Jeremy Linton2f39b5f2016-02-19 11:50:32 -0600531 /*
Ard Biesheuvel9fdc14c52016-06-23 15:53:17 +0200532 * mark .rodata as read only. Use __init_begin rather than __end_rodata
533 * to cover NOTES and EXCEPTION_TABLE.
Jeremy Linton2f39b5f2016-02-19 11:50:32 -0600534 */
Ard Biesheuvel9fdc14c52016-06-23 15:53:17 +0200535 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
Ard Biesheuvelaa8c09b2017-03-09 21:52:00 +0100536 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
Jeremy Linton2f39b5f2016-02-19 11:50:32 -0600537 section_size, PAGE_KERNEL_RO);
Ard Biesheuvele98216b2016-10-21 12:22:56 +0100538
Laura Abbott1404d6f2016-10-27 09:27:34 -0700539 debug_checkwx();
Laura Abbottda141702015-01-21 17:36:06 -0800540}
Laura Abbottda141702015-01-21 17:36:06 -0800541
Will Deacon20a004e2018-02-15 11:14:56 +0000542static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100543 pgprot_t prot, struct vm_struct *vma,
Will Deacon92bbd162017-07-24 11:46:09 +0100544 int flags, unsigned long vm_flags)
Mark Rutland068a17a2016-01-25 11:45:12 +0000545{
Laura Abbott2077be62017-01-10 13:35:49 -0800546 phys_addr_t pa_start = __pa_symbol(va_start);
Mark Rutland068a17a2016-01-25 11:45:12 +0000547 unsigned long size = va_end - va_start;
548
549 BUG_ON(!PAGE_ALIGNED(pa_start));
550 BUG_ON(!PAGE_ALIGNED(size));
551
Will Deacon20a004e2018-02-15 11:14:56 +0000552 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100553 early_pgtable_alloc, flags);
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100554
Will Deacon92bbd162017-07-24 11:46:09 +0100555 if (!(vm_flags & VM_NO_GUARD))
556 size += PAGE_SIZE;
557
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100558 vma->addr = va_start;
559 vma->phys_addr = pa_start;
560 vma->size = size;
Will Deacon92bbd162017-07-24 11:46:09 +0100561 vma->flags = VM_MAP | vm_flags;
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100562 vma->caller = __builtin_return_address(0);
563
564 vm_area_add_early(vma);
Mark Rutland068a17a2016-01-25 11:45:12 +0000565}
566
Ard Biesheuvel28b066d2017-03-09 21:52:02 +0100567static int __init parse_rodata(char *arg)
568{
Ard Biesheuvelc55191e2018-11-07 11:36:20 +0100569 int ret = strtobool(arg, &rodata_enabled);
570 if (!ret) {
571 rodata_full = false;
572 return 0;
573 }
574
575 /* permit 'full' in addition to boolean options */
576 if (strcmp(arg, "full"))
577 return -EINVAL;
578
579 rodata_enabled = true;
580 rodata_full = true;
581 return 0;
Ard Biesheuvel28b066d2017-03-09 21:52:02 +0100582}
583early_param("rodata", parse_rodata);
584
Will Deacon51a00482017-11-14 14:14:17 +0000585#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
586static int __init map_entry_trampoline(void)
587{
Will Deacon51a00482017-11-14 14:14:17 +0000588 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
589 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
590
591 /* The trampoline is always mapped and can therefore be global */
592 pgprot_val(prot) &= ~PTE_NG;
593
594 /* Map only the text into the trampoline page table */
595 memset(tramp_pg_dir, 0, PGD_SIZE);
596 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
Will Deacon475ba3f2019-04-08 11:23:48 +0100597 prot, __pgd_pgtable_alloc, 0);
Will Deacon51a00482017-11-14 14:14:17 +0000598
Will Deacon6c27c402017-12-06 11:24:02 +0000599 /* Map both the text and data into the kernel page table */
Will Deacon51a00482017-11-14 14:14:17 +0000600 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
Will Deacon6c27c402017-12-06 11:24:02 +0000601 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
602 extern char __entry_tramp_data_start[];
603
604 __set_fixmap(FIX_ENTRY_TRAMP_DATA,
605 __pa_symbol(__entry_tramp_data_start),
606 PAGE_KERNEL_RO);
607 }
608
Will Deacon51a00482017-11-14 14:14:17 +0000609 return 0;
610}
611core_initcall(map_entry_trampoline);
612#endif
613
Mark Rutland068a17a2016-01-25 11:45:12 +0000614/*
Mark Brownc8027282020-05-06 20:51:31 +0100615 * Open coded check for BTI, only for use to determine configuration
616 * for early mappings for before the cpufeature code has run.
617 */
618static bool arm64_early_this_cpu_has_bti(void)
619{
620 u64 pfr1;
621
622 if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
623 return false;
624
625 pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1);
626 return cpuid_feature_extract_unsigned_field(pfr1,
627 ID_AA64PFR1_BT_SHIFT);
628}
629
630/*
Mark Rutland068a17a2016-01-25 11:45:12 +0000631 * Create fine-grained mappings for the kernel.
632 */
Will Deacon20a004e2018-02-15 11:14:56 +0000633static void __init map_kernel(pgd_t *pgdp)
Mark Rutland068a17a2016-01-25 11:45:12 +0000634{
Ard Biesheuvel2ebe088b2017-03-09 21:52:03 +0100635 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
636 vmlinux_initdata, vmlinux_data;
Mark Rutland068a17a2016-01-25 11:45:12 +0000637
Ard Biesheuvel28b066d2017-03-09 21:52:02 +0100638 /*
639 * External debuggers may need to write directly to the text
640 * mapping to install SW breakpoints. Allow this (only) when
641 * explicitly requested with rodata=off.
642 */
643 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
644
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100645 /*
Mark Brownc8027282020-05-06 20:51:31 +0100646 * If we have a CPU that supports BTI and a kernel built for
647 * BTI then mark the kernel executable text as guarded pages
648 * now so we don't have to rewrite the page tables later.
649 */
650 if (arm64_early_this_cpu_has_bti())
651 text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
652
653 /*
Ard Biesheuveld27cfa12017-03-09 21:52:09 +0100654 * Only rodata will be remapped with different permissions later on,
655 * all other segments are allowed to use contiguous mappings.
656 */
Will Deacon20a004e2018-02-15 11:14:56 +0000657 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
Will Deacon92bbd162017-07-24 11:46:09 +0100658 VM_NO_GUARD);
Will Deacon20a004e2018-02-15 11:14:56 +0000659 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
Will Deacon92bbd162017-07-24 11:46:09 +0100660 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
Will Deacon20a004e2018-02-15 11:14:56 +0000661 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
Will Deacon92bbd162017-07-24 11:46:09 +0100662 &vmlinux_inittext, 0, VM_NO_GUARD);
Will Deacon20a004e2018-02-15 11:14:56 +0000663 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
Will Deacon92bbd162017-07-24 11:46:09 +0100664 &vmlinux_initdata, 0, VM_NO_GUARD);
Will Deacon20a004e2018-02-15 11:14:56 +0000665 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
Mark Rutland068a17a2016-01-25 11:45:12 +0000666
Mike Rapoport974b9b22020-06-08 21:33:10 -0700667 if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) {
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100668 /*
669 * The fixmap falls in a separate pgd to the kernel, and doesn't
670 * live in the carveout for the swapper_pg_dir. We can simply
671 * re-use the existing dir for the fixmap.
672 */
Mike Rapoport974b9b22020-06-08 21:33:10 -0700673 set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START),
Will Deacon20a004e2018-02-15 11:14:56 +0000674 READ_ONCE(*pgd_offset_k(FIXADDR_START)));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100675 } else if (CONFIG_PGTABLE_LEVELS > 3) {
Mark Rutlandb333b0b2019-08-27 16:57:08 +0100676 pgd_t *bm_pgdp;
Mike Rapoporte9f63762020-06-04 16:46:23 -0700677 p4d_t *bm_p4dp;
Mark Rutlandb333b0b2019-08-27 16:57:08 +0100678 pud_t *bm_pudp;
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100679 /*
680 * The fixmap shares its top level pgd entry with the kernel
681 * mapping. This can really only occur when we are running
682 * with 16k/4 levels, so we can simply reuse the pud level
683 * entry instead.
684 */
685 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
Mike Rapoport974b9b22020-06-08 21:33:10 -0700686 bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START);
Mike Rapoporte9f63762020-06-04 16:46:23 -0700687 bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START);
688 bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START);
Mark Rutlandb333b0b2019-08-27 16:57:08 +0100689 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100690 pud_clear_fixmap();
691 } else {
692 BUG();
693 }
Mark Rutland068a17a2016-01-25 11:45:12 +0000694
Will Deacon20a004e2018-02-15 11:14:56 +0000695 kasan_copy_shadow(pgdp);
Mark Rutland068a17a2016-01-25 11:45:12 +0000696}
697
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000698void __init paging_init(void)
699{
Jun Yao2330b7c2018-09-24 17:15:02 +0100700 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
Mark Rutland068a17a2016-01-25 11:45:12 +0000701
Will Deacon20a004e2018-02-15 11:14:56 +0000702 map_kernel(pgdp);
703 map_mem(pgdp);
Mark Rutland068a17a2016-01-25 11:45:12 +0000704
Mark Rutland068a17a2016-01-25 11:45:12 +0000705 pgd_clear_fixmap();
Mark Rutland068a17a2016-01-25 11:45:12 +0000706
Mark Rutland068a17a2016-01-25 11:45:12 +0000707 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
Jun Yao2b5548b2018-09-24 15:47:49 +0100708 init_mm.pgd = swapper_pg_dir;
Mark Rutland068a17a2016-01-25 11:45:12 +0000709
Jun Yao2b5548b2018-09-24 15:47:49 +0100710 memblock_free(__pa_symbol(init_pg_dir),
711 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
Ard Biesheuvel24cc61d2018-11-07 15:16:06 +0100712
713 memblock_allow_resize();
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000714}
715
716/*
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000717 * Check whether a kernel address is valid (derived from arch/x86/).
718 */
719int kern_addr_valid(unsigned long addr)
720{
Will Deacon20a004e2018-02-15 11:14:56 +0000721 pgd_t *pgdp;
Mike Rapoporte9f63762020-06-04 16:46:23 -0700722 p4d_t *p4dp;
Will Deacon20a004e2018-02-15 11:14:56 +0000723 pud_t *pudp, pud;
724 pmd_t *pmdp, pmd;
725 pte_t *ptep, pte;
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000726
Shyam Thombre8dd4daa2020-06-10 16:39:44 +0530727 addr = arch_kasan_reset_tag(addr);
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000728 if ((((long)addr) >> VA_BITS) != -1UL)
729 return 0;
730
Will Deacon20a004e2018-02-15 11:14:56 +0000731 pgdp = pgd_offset_k(addr);
732 if (pgd_none(READ_ONCE(*pgdp)))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000733 return 0;
734
Mike Rapoporte9f63762020-06-04 16:46:23 -0700735 p4dp = p4d_offset(pgdp, addr);
736 if (p4d_none(READ_ONCE(*p4dp)))
737 return 0;
738
739 pudp = pud_offset(p4dp, addr);
Will Deacon20a004e2018-02-15 11:14:56 +0000740 pud = READ_ONCE(*pudp);
741 if (pud_none(pud))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000742 return 0;
743
Will Deacon20a004e2018-02-15 11:14:56 +0000744 if (pud_sect(pud))
745 return pfn_valid(pud_pfn(pud));
Steve Capper206a2a72014-05-06 14:02:27 +0100746
Will Deacon20a004e2018-02-15 11:14:56 +0000747 pmdp = pmd_offset(pudp, addr);
748 pmd = READ_ONCE(*pmdp);
749 if (pmd_none(pmd))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000750 return 0;
751
Will Deacon20a004e2018-02-15 11:14:56 +0000752 if (pmd_sect(pmd))
753 return pfn_valid(pmd_pfn(pmd));
Dave Andersonda6e4cb2014-04-15 18:53:24 +0100754
Will Deacon20a004e2018-02-15 11:14:56 +0000755 ptep = pte_offset_kernel(pmdp, addr);
756 pte = READ_ONCE(*ptep);
757 if (pte_none(pte))
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000758 return 0;
759
Will Deacon20a004e2018-02-15 11:14:56 +0000760 return pfn_valid(pte_pfn(pte));
Catalin Marinasc1cc1552012-03-05 11:49:27 +0000761}
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530762
763#ifdef CONFIG_MEMORY_HOTPLUG
Anshuman Khandualeee07932020-08-06 23:23:29 -0700764static void free_hotplug_page_range(struct page *page, size_t size,
765 struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530766{
Anshuman Khandualeee07932020-08-06 23:23:29 -0700767 if (altmap) {
768 vmem_altmap_free(altmap, size >> PAGE_SHIFT);
769 } else {
770 WARN_ON(PageReserved(page));
771 free_pages((unsigned long)page_address(page), get_order(size));
772 }
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530773}
774
775static void free_hotplug_pgtable_page(struct page *page)
776{
Anshuman Khandualeee07932020-08-06 23:23:29 -0700777 free_hotplug_page_range(page, PAGE_SIZE, NULL);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530778}
779
780static bool pgtable_range_aligned(unsigned long start, unsigned long end,
781 unsigned long floor, unsigned long ceiling,
782 unsigned long mask)
783{
784 start &= mask;
785 if (start < floor)
786 return false;
787
788 if (ceiling) {
789 ceiling &= mask;
790 if (!ceiling)
791 return false;
792 }
793
794 if (end - 1 > ceiling - 1)
795 return false;
796 return true;
797}
798
799static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr,
Anshuman Khandualeee07932020-08-06 23:23:29 -0700800 unsigned long end, bool free_mapped,
801 struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530802{
803 pte_t *ptep, pte;
804
805 do {
806 ptep = pte_offset_kernel(pmdp, addr);
807 pte = READ_ONCE(*ptep);
808 if (pte_none(pte))
809 continue;
810
811 WARN_ON(!pte_present(pte));
812 pte_clear(&init_mm, addr, ptep);
813 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
814 if (free_mapped)
Anshuman Khandualeee07932020-08-06 23:23:29 -0700815 free_hotplug_page_range(pte_page(pte),
816 PAGE_SIZE, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530817 } while (addr += PAGE_SIZE, addr < end);
818}
819
820static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr,
Anshuman Khandualeee07932020-08-06 23:23:29 -0700821 unsigned long end, bool free_mapped,
822 struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530823{
824 unsigned long next;
825 pmd_t *pmdp, pmd;
826
827 do {
828 next = pmd_addr_end(addr, end);
829 pmdp = pmd_offset(pudp, addr);
830 pmd = READ_ONCE(*pmdp);
831 if (pmd_none(pmd))
832 continue;
833
834 WARN_ON(!pmd_present(pmd));
835 if (pmd_sect(pmd)) {
836 pmd_clear(pmdp);
837
838 /*
839 * One TLBI should be sufficient here as the PMD_SIZE
840 * range is mapped with a single block entry.
841 */
842 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
843 if (free_mapped)
844 free_hotplug_page_range(pmd_page(pmd),
Anshuman Khandualeee07932020-08-06 23:23:29 -0700845 PMD_SIZE, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530846 continue;
847 }
848 WARN_ON(!pmd_table(pmd));
Anshuman Khandualeee07932020-08-06 23:23:29 -0700849 unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530850 } while (addr = next, addr < end);
851}
852
853static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr,
Anshuman Khandualeee07932020-08-06 23:23:29 -0700854 unsigned long end, bool free_mapped,
855 struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530856{
857 unsigned long next;
858 pud_t *pudp, pud;
859
860 do {
861 next = pud_addr_end(addr, end);
862 pudp = pud_offset(p4dp, addr);
863 pud = READ_ONCE(*pudp);
864 if (pud_none(pud))
865 continue;
866
867 WARN_ON(!pud_present(pud));
868 if (pud_sect(pud)) {
869 pud_clear(pudp);
870
871 /*
872 * One TLBI should be sufficient here as the PUD_SIZE
873 * range is mapped with a single block entry.
874 */
875 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
876 if (free_mapped)
877 free_hotplug_page_range(pud_page(pud),
Anshuman Khandualeee07932020-08-06 23:23:29 -0700878 PUD_SIZE, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530879 continue;
880 }
881 WARN_ON(!pud_table(pud));
Anshuman Khandualeee07932020-08-06 23:23:29 -0700882 unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530883 } while (addr = next, addr < end);
884}
885
886static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr,
Anshuman Khandualeee07932020-08-06 23:23:29 -0700887 unsigned long end, bool free_mapped,
888 struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530889{
890 unsigned long next;
891 p4d_t *p4dp, p4d;
892
893 do {
894 next = p4d_addr_end(addr, end);
895 p4dp = p4d_offset(pgdp, addr);
896 p4d = READ_ONCE(*p4dp);
897 if (p4d_none(p4d))
898 continue;
899
900 WARN_ON(!p4d_present(p4d));
Anshuman Khandualeee07932020-08-06 23:23:29 -0700901 unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530902 } while (addr = next, addr < end);
903}
904
905static void unmap_hotplug_range(unsigned long addr, unsigned long end,
Anshuman Khandualeee07932020-08-06 23:23:29 -0700906 bool free_mapped, struct vmem_altmap *altmap)
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530907{
908 unsigned long next;
909 pgd_t *pgdp, pgd;
910
Anshuman Khandualeee07932020-08-06 23:23:29 -0700911 /*
912 * altmap can only be used as vmemmap mapping backing memory.
913 * In case the backing memory itself is not being freed, then
914 * altmap is irrelevant. Warn about this inconsistency when
915 * encountered.
916 */
917 WARN_ON(!free_mapped && altmap);
918
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530919 do {
920 next = pgd_addr_end(addr, end);
921 pgdp = pgd_offset_k(addr);
922 pgd = READ_ONCE(*pgdp);
923 if (pgd_none(pgd))
924 continue;
925
926 WARN_ON(!pgd_present(pgd));
Anshuman Khandualeee07932020-08-06 23:23:29 -0700927 unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +0530928 } while (addr = next, addr < end);
929}
930
931static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
932 unsigned long end, unsigned long floor,
933 unsigned long ceiling)
934{
935 pte_t *ptep, pte;
936 unsigned long i, start = addr;
937
938 do {
939 ptep = pte_offset_kernel(pmdp, addr);
940 pte = READ_ONCE(*ptep);
941
942 /*
943 * This is just a sanity check here which verifies that
944 * pte clearing has been done by earlier unmap loops.
945 */
946 WARN_ON(!pte_none(pte));
947 } while (addr += PAGE_SIZE, addr < end);
948
949 if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK))
950 return;
951
952 /*
953 * Check whether we can free the pte page if the rest of the
954 * entries are empty. Overlap with other regions have been
955 * handled by the floor/ceiling check.
956 */
957 ptep = pte_offset_kernel(pmdp, 0UL);
958 for (i = 0; i < PTRS_PER_PTE; i++) {
959 if (!pte_none(READ_ONCE(ptep[i])))
960 return;
961 }
962
963 pmd_clear(pmdp);
964 __flush_tlb_kernel_pgtable(start);
965 free_hotplug_pgtable_page(virt_to_page(ptep));
966}
967
968static void free_empty_pmd_table(pud_t *pudp, unsigned long addr,
969 unsigned long end, unsigned long floor,
970 unsigned long ceiling)
971{
972 pmd_t *pmdp, pmd;
973 unsigned long i, next, start = addr;
974
975 do {
976 next = pmd_addr_end(addr, end);
977 pmdp = pmd_offset(pudp, addr);
978 pmd = READ_ONCE(*pmdp);
979 if (pmd_none(pmd))
980 continue;
981
982 WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd));
983 free_empty_pte_table(pmdp, addr, next, floor, ceiling);
984 } while (addr = next, addr < end);
985
986 if (CONFIG_PGTABLE_LEVELS <= 2)
987 return;
988
989 if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK))
990 return;
991
992 /*
993 * Check whether we can free the pmd page if the rest of the
994 * entries are empty. Overlap with other regions have been
995 * handled by the floor/ceiling check.
996 */
997 pmdp = pmd_offset(pudp, 0UL);
998 for (i = 0; i < PTRS_PER_PMD; i++) {
999 if (!pmd_none(READ_ONCE(pmdp[i])))
1000 return;
1001 }
1002
1003 pud_clear(pudp);
1004 __flush_tlb_kernel_pgtable(start);
1005 free_hotplug_pgtable_page(virt_to_page(pmdp));
1006}
1007
1008static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr,
1009 unsigned long end, unsigned long floor,
1010 unsigned long ceiling)
1011{
1012 pud_t *pudp, pud;
1013 unsigned long i, next, start = addr;
1014
1015 do {
1016 next = pud_addr_end(addr, end);
1017 pudp = pud_offset(p4dp, addr);
1018 pud = READ_ONCE(*pudp);
1019 if (pud_none(pud))
1020 continue;
1021
1022 WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud));
1023 free_empty_pmd_table(pudp, addr, next, floor, ceiling);
1024 } while (addr = next, addr < end);
1025
1026 if (CONFIG_PGTABLE_LEVELS <= 3)
1027 return;
1028
1029 if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK))
1030 return;
1031
1032 /*
1033 * Check whether we can free the pud page if the rest of the
1034 * entries are empty. Overlap with other regions have been
1035 * handled by the floor/ceiling check.
1036 */
1037 pudp = pud_offset(p4dp, 0UL);
1038 for (i = 0; i < PTRS_PER_PUD; i++) {
1039 if (!pud_none(READ_ONCE(pudp[i])))
1040 return;
1041 }
1042
1043 p4d_clear(p4dp);
1044 __flush_tlb_kernel_pgtable(start);
1045 free_hotplug_pgtable_page(virt_to_page(pudp));
1046}
1047
1048static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr,
1049 unsigned long end, unsigned long floor,
1050 unsigned long ceiling)
1051{
1052 unsigned long next;
1053 p4d_t *p4dp, p4d;
1054
1055 do {
1056 next = p4d_addr_end(addr, end);
1057 p4dp = p4d_offset(pgdp, addr);
1058 p4d = READ_ONCE(*p4dp);
1059 if (p4d_none(p4d))
1060 continue;
1061
1062 WARN_ON(!p4d_present(p4d));
1063 free_empty_pud_table(p4dp, addr, next, floor, ceiling);
1064 } while (addr = next, addr < end);
1065}
1066
1067static void free_empty_tables(unsigned long addr, unsigned long end,
1068 unsigned long floor, unsigned long ceiling)
1069{
1070 unsigned long next;
1071 pgd_t *pgdp, pgd;
1072
1073 do {
1074 next = pgd_addr_end(addr, end);
1075 pgdp = pgd_offset_k(addr);
1076 pgd = READ_ONCE(*pgdp);
1077 if (pgd_none(pgd))
1078 continue;
1079
1080 WARN_ON(!pgd_present(pgd));
1081 free_empty_p4d_table(pgdp, addr, next, floor, ceiling);
1082 } while (addr = next, addr < end);
1083}
1084#endif
1085
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001086#ifdef CONFIG_SPARSEMEM_VMEMMAP
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +01001087#if !ARM64_SWAPPER_USES_SECTION_MAPS
Christoph Hellwig7b73d972017-12-29 08:53:54 +01001088int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1089 struct vmem_altmap *altmap)
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001090{
Anshuman Khandualeee07932020-08-06 23:23:29 -07001091 return vmemmap_populate_basepages(start, end, node, altmap);
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001092}
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +01001093#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
Christoph Hellwig7b73d972017-12-29 08:53:54 +01001094int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
1095 struct vmem_altmap *altmap)
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001096{
Johannes Weiner0aad8182013-04-29 15:07:50 -07001097 unsigned long addr = start;
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001098 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +00001099 pgd_t *pgdp;
Mike Rapoporte9f63762020-06-04 16:46:23 -07001100 p4d_t *p4dp;
Will Deacon20a004e2018-02-15 11:14:56 +00001101 pud_t *pudp;
1102 pmd_t *pmdp;
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001103
1104 do {
1105 next = pmd_addr_end(addr, end);
1106
Will Deacon20a004e2018-02-15 11:14:56 +00001107 pgdp = vmemmap_pgd_populate(addr, node);
1108 if (!pgdp)
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001109 return -ENOMEM;
1110
Mike Rapoporte9f63762020-06-04 16:46:23 -07001111 p4dp = vmemmap_p4d_populate(pgdp, addr, node);
1112 if (!p4dp)
1113 return -ENOMEM;
1114
1115 pudp = vmemmap_pud_populate(p4dp, addr, node);
Will Deacon20a004e2018-02-15 11:14:56 +00001116 if (!pudp)
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001117 return -ENOMEM;
1118
Will Deacon20a004e2018-02-15 11:14:56 +00001119 pmdp = pmd_offset(pudp, addr);
1120 if (pmd_none(READ_ONCE(*pmdp))) {
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001121 void *p = NULL;
1122
Anshuman Khandualeee07932020-08-06 23:23:29 -07001123 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001124 if (!p)
1125 return -ENOMEM;
1126
Will Deacon20a004e2018-02-15 11:14:56 +00001127 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001128 } else
Will Deacon20a004e2018-02-15 11:14:56 +00001129 vmemmap_verify((pte_t *)pmdp, node, addr, next);
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001130 } while (addr = next, addr != end);
1131
1132 return 0;
1133}
Odin Ugedal8e010762019-06-07 01:49:10 +02001134#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
Christoph Hellwig24b6d412017-12-29 08:53:56 +01001135void vmemmap_free(unsigned long start, unsigned long end,
1136 struct vmem_altmap *altmap)
Tang Chen01975182013-02-22 16:33:08 -08001137{
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301138#ifdef CONFIG_MEMORY_HOTPLUG
1139 WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
1140
Anshuman Khandualeee07932020-08-06 23:23:29 -07001141 unmap_hotplug_range(start, end, true, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301142 free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
1143#endif
Tang Chen01975182013-02-22 16:33:08 -08001144}
Catalin Marinasc1cc1552012-03-05 11:49:27 +00001145#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Laura Abbottaf86e592014-11-21 21:50:42 +00001146
Laura Abbottaf86e592014-11-21 21:50:42 +00001147static inline pud_t * fixmap_pud(unsigned long addr)
1148{
Will Deacon20a004e2018-02-15 11:14:56 +00001149 pgd_t *pgdp = pgd_offset_k(addr);
Mike Rapoporte9f63762020-06-04 16:46:23 -07001150 p4d_t *p4dp = p4d_offset(pgdp, addr);
1151 p4d_t p4d = READ_ONCE(*p4dp);
Laura Abbottaf86e592014-11-21 21:50:42 +00001152
Mike Rapoporte9f63762020-06-04 16:46:23 -07001153 BUG_ON(p4d_none(p4d) || p4d_bad(p4d));
Laura Abbottaf86e592014-11-21 21:50:42 +00001154
Mike Rapoporte9f63762020-06-04 16:46:23 -07001155 return pud_offset_kimg(p4dp, addr);
Laura Abbottaf86e592014-11-21 21:50:42 +00001156}
1157
1158static inline pmd_t * fixmap_pmd(unsigned long addr)
1159{
Will Deacon20a004e2018-02-15 11:14:56 +00001160 pud_t *pudp = fixmap_pud(addr);
1161 pud_t pud = READ_ONCE(*pudp);
Laura Abbottaf86e592014-11-21 21:50:42 +00001162
Will Deacon20a004e2018-02-15 11:14:56 +00001163 BUG_ON(pud_none(pud) || pud_bad(pud));
Laura Abbottaf86e592014-11-21 21:50:42 +00001164
Will Deacon20a004e2018-02-15 11:14:56 +00001165 return pmd_offset_kimg(pudp, addr);
Laura Abbottaf86e592014-11-21 21:50:42 +00001166}
1167
1168static inline pte_t * fixmap_pte(unsigned long addr)
1169{
Ard Biesheuvel157962f2016-02-16 13:52:38 +01001170 return &bm_pte[pte_index(addr)];
Laura Abbottaf86e592014-11-21 21:50:42 +00001171}
1172
Laura Abbott2077be62017-01-10 13:35:49 -08001173/*
1174 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
1175 * directly on kernel symbols (bm_p*d). This function is called too early to use
1176 * lm_alias so __p*d_populate functions must be used to populate with the
1177 * physical address from __pa_symbol.
1178 */
Laura Abbottaf86e592014-11-21 21:50:42 +00001179void __init early_fixmap_init(void)
1180{
Mike Rapoporte9f63762020-06-04 16:46:23 -07001181 pgd_t *pgdp;
1182 p4d_t *p4dp, p4d;
Will Deacon20a004e2018-02-15 11:14:56 +00001183 pud_t *pudp;
1184 pmd_t *pmdp;
Laura Abbottaf86e592014-11-21 21:50:42 +00001185 unsigned long addr = FIXADDR_START;
1186
Will Deacon20a004e2018-02-15 11:14:56 +00001187 pgdp = pgd_offset_k(addr);
Mike Rapoporte9f63762020-06-04 16:46:23 -07001188 p4dp = p4d_offset(pgdp, addr);
1189 p4d = READ_ONCE(*p4dp);
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +01001190 if (CONFIG_PGTABLE_LEVELS > 3 &&
Mike Rapoporte9f63762020-06-04 16:46:23 -07001191 !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) {
Ard Biesheuvelf9040772016-02-16 13:52:40 +01001192 /*
1193 * We only end up here if the kernel mapping and the fixmap
1194 * share the top level pgd entry, which should only happen on
1195 * 16k/4 levels configurations.
1196 */
1197 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
Mike Rapoporte9f63762020-06-04 16:46:23 -07001198 pudp = pud_offset_kimg(p4dp, addr);
Ard Biesheuvelf9040772016-02-16 13:52:40 +01001199 } else {
Mike Rapoporte9f63762020-06-04 16:46:23 -07001200 if (p4d_none(p4d))
1201 __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
Will Deacon20a004e2018-02-15 11:14:56 +00001202 pudp = fixmap_pud(addr);
Ard Biesheuvelf9040772016-02-16 13:52:40 +01001203 }
Will Deacon20a004e2018-02-15 11:14:56 +00001204 if (pud_none(READ_ONCE(*pudp)))
1205 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
1206 pmdp = fixmap_pmd(addr);
1207 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
Laura Abbottaf86e592014-11-21 21:50:42 +00001208
1209 /*
1210 * The boot-ioremap range spans multiple pmds, for which
Ard Biesheuvel157962f2016-02-16 13:52:38 +01001211 * we are not prepared:
Laura Abbottaf86e592014-11-21 21:50:42 +00001212 */
1213 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
1214 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
1215
Will Deacon20a004e2018-02-15 11:14:56 +00001216 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
1217 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Laura Abbottaf86e592014-11-21 21:50:42 +00001218 WARN_ON(1);
Will Deacon20a004e2018-02-15 11:14:56 +00001219 pr_warn("pmdp %p != %p, %p\n",
1220 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
Laura Abbottaf86e592014-11-21 21:50:42 +00001221 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
1222 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
1223 fix_to_virt(FIX_BTMAP_BEGIN));
1224 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
1225 fix_to_virt(FIX_BTMAP_END));
1226
1227 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
1228 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
1229 }
1230}
1231
James Morse18b4b272017-11-06 18:44:26 +00001232/*
1233 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
1234 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
1235 */
Laura Abbottaf86e592014-11-21 21:50:42 +00001236void __set_fixmap(enum fixed_addresses idx,
1237 phys_addr_t phys, pgprot_t flags)
1238{
1239 unsigned long addr = __fix_to_virt(idx);
Will Deacon20a004e2018-02-15 11:14:56 +00001240 pte_t *ptep;
Laura Abbottaf86e592014-11-21 21:50:42 +00001241
Mark Rutlandb63dbef2015-03-04 13:27:35 +00001242 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
Laura Abbottaf86e592014-11-21 21:50:42 +00001243
Will Deacon20a004e2018-02-15 11:14:56 +00001244 ptep = fixmap_pte(addr);
Laura Abbottaf86e592014-11-21 21:50:42 +00001245
1246 if (pgprot_val(flags)) {
Will Deacon20a004e2018-02-15 11:14:56 +00001247 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
Laura Abbottaf86e592014-11-21 21:50:42 +00001248 } else {
Will Deacon20a004e2018-02-15 11:14:56 +00001249 pte_clear(&init_mm, addr, ptep);
Laura Abbottaf86e592014-11-21 21:50:42 +00001250 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
1251 }
1252}
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001253
Hsin-Yi Wange112b032019-08-23 14:24:50 +08001254void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001255{
1256 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +01001257 int offset;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001258 void *dt_virt;
1259
1260 /*
1261 * Check whether the physical FDT address is set and meets the minimum
1262 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
Ard Biesheuvel04a84812016-08-01 13:29:31 +02001263 * at least 8 bytes so that we can always access the magic and size
1264 * fields of the FDT header after mapping the first chunk, double check
1265 * here if that is indeed the case.
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001266 */
1267 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
1268 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
1269 return NULL;
1270
1271 /*
1272 * Make sure that the FDT region can be mapped without the need to
1273 * allocate additional translation table pages, so that it is safe
Laura Abbott132233a2016-02-05 16:24:46 -08001274 * to call create_mapping_noalloc() this early.
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001275 *
1276 * On 64k pages, the FDT will be mapped using PTEs, so we need to
1277 * be in the same PMD as the rest of the fixmap.
1278 * On 4k pages, we'll use section mappings for the FDT so we only
1279 * have to be in the same PUD.
1280 */
1281 BUILD_BUG_ON(dt_virt_base % SZ_2M);
1282
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +01001283 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
1284 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001285
Suzuki K. Pouloseb433dce2015-10-19 14:19:28 +01001286 offset = dt_phys % SWAPPER_BLOCK_SIZE;
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001287 dt_virt = (void *)dt_virt_base + offset;
1288
1289 /* map the first chunk so we can read the size from the header */
Laura Abbott132233a2016-02-05 16:24:46 -08001290 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
1291 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001292
Ard Biesheuvel04a84812016-08-01 13:29:31 +02001293 if (fdt_magic(dt_virt) != FDT_MAGIC)
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001294 return NULL;
1295
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +01001296 *size = fdt_totalsize(dt_virt);
1297 if (*size > MAX_FDT_SIZE)
Ard Biesheuvel61bd93c2015-06-01 13:40:32 +02001298 return NULL;
1299
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +01001300 if (offset + *size > SWAPPER_BLOCK_SIZE)
Laura Abbott132233a2016-02-05 16:24:46 -08001301 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +01001302 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
1303
1304 return dt_virt;
1305}
1306
Anshuman Khandual0f472d02019-07-16 16:27:33 -07001307int __init arch_ioremap_p4d_supported(void)
1308{
1309 return 0;
1310}
1311
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001312int __init arch_ioremap_pud_supported(void)
1313{
Mark Rutland7ba36ec2019-05-14 14:30:06 +05301314 /*
1315 * Only 4k granule supports level 1 block mappings.
1316 * SW table walks can't handle removal of intermediate entries.
1317 */
1318 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
Steven Price102f45f2020-02-03 17:36:29 -08001319 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001320}
1321
1322int __init arch_ioremap_pmd_supported(void)
1323{
Mark Rutland7ba36ec2019-05-14 14:30:06 +05301324 /* See arch_ioremap_pud_supported() */
Steven Price102f45f2020-02-03 17:36:29 -08001325 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001326}
1327
Will Deacon20a004e2018-02-15 11:14:56 +00001328int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001329{
Anshuman Khandualf7f00972019-05-27 09:28:15 +05301330 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
Will Deacon15122ee2018-02-21 12:59:27 +00001331
Laura Abbott82034c22018-05-23 11:43:46 -07001332 /* Only allow permission changes for now */
1333 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
1334 pud_val(new_pud)))
Will Deacon15122ee2018-02-21 12:59:27 +00001335 return 0;
1336
Anshuman Khandual87dedf72019-05-27 12:33:29 +05301337 VM_BUG_ON(phys & ~PUD_MASK);
Laura Abbott82034c22018-05-23 11:43:46 -07001338 set_pud(pudp, new_pud);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001339 return 1;
1340}
1341
Will Deacon20a004e2018-02-15 11:14:56 +00001342int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001343{
Anshuman Khandualf7f00972019-05-27 09:28:15 +05301344 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
Will Deacon15122ee2018-02-21 12:59:27 +00001345
Laura Abbott82034c22018-05-23 11:43:46 -07001346 /* Only allow permission changes for now */
1347 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
1348 pmd_val(new_pmd)))
Will Deacon15122ee2018-02-21 12:59:27 +00001349 return 0;
1350
Anshuman Khandual87dedf72019-05-27 12:33:29 +05301351 VM_BUG_ON(phys & ~PMD_MASK);
Laura Abbott82034c22018-05-23 11:43:46 -07001352 set_pmd(pmdp, new_pmd);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001353 return 1;
1354}
1355
Will Deacon20a004e2018-02-15 11:14:56 +00001356int pud_clear_huge(pud_t *pudp)
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001357{
Will Deacon20a004e2018-02-15 11:14:56 +00001358 if (!pud_sect(READ_ONCE(*pudp)))
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001359 return 0;
Will Deacon20a004e2018-02-15 11:14:56 +00001360 pud_clear(pudp);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001361 return 1;
1362}
1363
Will Deacon20a004e2018-02-15 11:14:56 +00001364int pmd_clear_huge(pmd_t *pmdp)
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001365{
Will Deacon20a004e2018-02-15 11:14:56 +00001366 if (!pmd_sect(READ_ONCE(*pmdp)))
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001367 return 0;
Will Deacon20a004e2018-02-15 11:14:56 +00001368 pmd_clear(pmdp);
Ard Biesheuvel324420b2016-02-16 13:52:35 +01001369 return 1;
1370}
Toshi Kanib6bdb752018-03-22 16:17:20 -07001371
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301372int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
Toshi Kanib6bdb752018-03-22 16:17:20 -07001373{
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301374 pte_t *table;
1375 pmd_t pmd;
1376
1377 pmd = READ_ONCE(*pmdp);
1378
Mark Rutlandfac880c2018-09-05 17:38:57 +01001379 if (!pmd_table(pmd)) {
Will Deacon9c006972018-12-28 00:37:42 -08001380 VM_WARN_ON(1);
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301381 return 1;
1382 }
1383
1384 table = pte_offset_kernel(pmdp, addr);
1385 pmd_clear(pmdp);
1386 __flush_tlb_kernel_pgtable(addr);
1387 pte_free_kernel(NULL, table);
1388 return 1;
Toshi Kanib6bdb752018-03-22 16:17:20 -07001389}
1390
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301391int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
Toshi Kanib6bdb752018-03-22 16:17:20 -07001392{
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301393 pmd_t *table;
1394 pmd_t *pmdp;
1395 pud_t pud;
1396 unsigned long next, end;
1397
1398 pud = READ_ONCE(*pudp);
1399
Mark Rutlandfac880c2018-09-05 17:38:57 +01001400 if (!pud_table(pud)) {
Will Deacon9c006972018-12-28 00:37:42 -08001401 VM_WARN_ON(1);
Chintan Pandyaec28bb92018-06-06 12:31:21 +05301402 return 1;
1403 }
1404
1405 table = pmd_offset(pudp, addr);
1406 pmdp = table;
1407 next = addr;
1408 end = addr + PUD_SIZE;
1409 do {
1410 pmd_free_pte_page(pmdp, next);
1411 } while (pmdp++, next += PMD_SIZE, next != end);
1412
1413 pud_clear(pudp);
1414 __flush_tlb_kernel_pgtable(addr);
1415 pmd_free(NULL, table);
1416 return 1;
Toshi Kanib6bdb752018-03-22 16:17:20 -07001417}
Robin Murphy4ab21502018-12-11 18:48:48 +00001418
Will Deacon8e2d4342018-12-28 00:37:53 -08001419int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1420{
1421 return 0; /* Don't attempt a block mapping */
1422}
1423
Robin Murphy4ab21502018-12-11 18:48:48 +00001424#ifdef CONFIG_MEMORY_HOTPLUG
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301425static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
1426{
1427 unsigned long end = start + size;
1428
1429 WARN_ON(pgdir != init_mm.pgd);
1430 WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END));
1431
Anshuman Khandualeee07932020-08-06 23:23:29 -07001432 unmap_hotplug_range(start, end, false, NULL);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301433 free_empty_tables(start, end, PAGE_OFFSET, PAGE_END);
1434}
1435
Michal Hocko940519f2019-05-13 17:21:26 -07001436int arch_add_memory(int nid, u64 start, u64 size,
Logan Gunthorpef5637d32020-04-10 14:33:21 -07001437 struct mhp_params *params)
Robin Murphy4ab21502018-12-11 18:48:48 +00001438{
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301439 int ret, flags = 0;
Robin Murphy4ab21502018-12-11 18:48:48 +00001440
1441 if (rodata_full || debug_pagealloc_enabled())
1442 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1443
1444 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
Logan Gunthorpebfeb0222020-04-10 14:33:36 -07001445 size, params->pgprot, __pgd_pgtable_alloc,
1446 flags);
Robin Murphy4ab21502018-12-11 18:48:48 +00001447
Dan Williams16993c02019-11-06 17:43:21 -08001448 memblock_clear_nomap(start, size);
1449
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301450 ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
Logan Gunthorpef5637d32020-04-10 14:33:21 -07001451 params);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301452 if (ret)
1453 __remove_pgd_mapping(swapper_pg_dir,
1454 __phys_to_virt(start), size);
1455 return ret;
Robin Murphy4ab21502018-12-11 18:48:48 +00001456}
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301457
David Hildenbrand22eb6342019-07-18 15:56:41 -07001458void arch_remove_memory(int nid, u64 start, u64 size,
1459 struct vmem_altmap *altmap)
1460{
1461 unsigned long start_pfn = start >> PAGE_SHIFT;
1462 unsigned long nr_pages = size >> PAGE_SHIFT;
David Hildenbrand22eb6342019-07-18 15:56:41 -07001463
David Hildenbrandfeee6b22020-01-04 12:59:33 -08001464 __remove_pages(start_pfn, nr_pages, altmap);
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301465 __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size);
David Hildenbrand22eb6342019-07-18 15:56:41 -07001466}
Anshuman Khandualbbd6ec62020-03-04 09:58:43 +05301467
1468/*
1469 * This memory hotplug notifier helps prevent boot memory from being
1470 * inadvertently removed as it blocks pfn range offlining process in
1471 * __offline_pages(). Hence this prevents both offlining as well as
1472 * removal process for boot memory which is initially always online.
1473 * In future if and when boot memory could be removed, this notifier
1474 * should be dropped and free_hotplug_page_range() should handle any
1475 * reserved pages allocated during boot.
1476 */
1477static int prevent_bootmem_remove_notifier(struct notifier_block *nb,
1478 unsigned long action, void *data)
1479{
1480 struct mem_section *ms;
1481 struct memory_notify *arg = data;
1482 unsigned long end_pfn = arg->start_pfn + arg->nr_pages;
1483 unsigned long pfn = arg->start_pfn;
1484
1485 if (action != MEM_GOING_OFFLINE)
1486 return NOTIFY_OK;
1487
1488 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1489 ms = __pfn_to_section(pfn);
1490 if (early_section(ms))
1491 return NOTIFY_BAD;
1492 }
1493 return NOTIFY_OK;
1494}
1495
1496static struct notifier_block prevent_bootmem_remove_nb = {
1497 .notifier_call = prevent_bootmem_remove_notifier,
1498};
1499
1500static int __init prevent_bootmem_remove_init(void)
1501{
1502 return register_memory_notifier(&prevent_bootmem_remove_nb);
1503}
1504device_initcall(prevent_bootmem_remove_init);
David Hildenbrand22eb6342019-07-18 15:56:41 -07001505#endif