blob: f61f7ca6fe0fde512c6909eeec795f424123c0f6 [file] [log] [blame]
Nick Hu8ad8b722020-01-06 10:38:32 -08001// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2019 Andes Technology Corporation
3
4#include <linux/pfn.h>
5#include <linux/init_task.h>
6#include <linux/kasan.h>
7#include <linux/kernel.h>
8#include <linux/memblock.h>
Mike Rapoportca5999f2020-06-08 21:32:38 -07009#include <linux/pgtable.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070010#include <asm/tlbflush.h>
Nick Hu8ad8b722020-01-06 10:38:32 -080011#include <asm/fixmap.h>
Nylon Chene178d672021-01-16 13:58:35 +080012#include <asm/pgalloc.h>
13
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010014/*
15 * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
16 * which is right before the kernel.
17 *
18 * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
19 * the page global directory with kasan_early_shadow_pmd.
20 *
21 * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
22 * must be divided as follows:
23 * - the first PGD entry, although incomplete, is populated with
24 * kasan_early_shadow_pud/p4d
25 * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
26 * - the last PGD entry is shared with the kernel mapping so populated at the
27 * lower levels pud/p4d
28 *
29 * In addition, when shallow populating a kasan region (for example vmalloc),
30 * this region may also not be aligned on PGDIR size, so we must go down to the
31 * pud level too.
32 */
33
Nick Hu8ad8b722020-01-06 10:38:32 -080034extern pgd_t early_pg_dir[PTRS_PER_PGD];
Nick Hu8ad8b722020-01-06 10:38:32 -080035
Jisheng Zhang19875012021-03-30 02:22:21 +080036static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
Nick Hu8ad8b722020-01-06 10:38:32 -080037{
Alexandre Ghitid127c192021-02-08 14:30:16 -050038 phys_addr_t phys_addr;
39 pte_t *ptep, *base_pte;
40
41 if (pmd_none(*pmd))
42 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
43 else
44 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
45
46 ptep = base_pte + pte_index(vaddr);
47
48 do {
49 if (pte_none(*ptep)) {
50 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
51 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
52 }
53 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
54
55 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
56}
57
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010058static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
Alexandre Ghitid127c192021-02-08 14:30:16 -050059{
60 phys_addr_t phys_addr;
61 pmd_t *pmdp, *base_pmd;
62 unsigned long next;
63
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010064 if (pud_none(*pud)) {
Alexandre Ghitid127c192021-02-08 14:30:16 -050065 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010066 } else {
67 base_pmd = (pmd_t *)pud_pgtable(*pud);
68 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
69 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
70 }
Alexandre Ghitid127c192021-02-08 14:30:16 -050071
72 pmdp = base_pmd + pmd_index(vaddr);
73
74 do {
75 next = pmd_addr_end(vaddr, end);
Alexandre Ghitid7fbcf402021-02-08 14:30:17 -050076
77 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
78 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
79 if (phys_addr) {
80 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
81 continue;
82 }
83 }
84
Alexandre Ghitid127c192021-02-08 14:30:16 -050085 kasan_populate_pte(pmdp, vaddr, next);
86 } while (pmdp++, vaddr = next, vaddr != end);
87
88 /*
89 * Wait for the whole PGD to be populated before setting the PGD in
90 * the page table, otherwise, if we did set the PGD before populating
91 * it entirely, memblock could allocate a page at a physical address
92 * where KASAN is not populated yet and then we'd get a page fault.
93 */
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010094 set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
Alexandre Ghitid127c192021-02-08 14:30:16 -050095}
96
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +010097static void __init kasan_populate_pud(pgd_t *pgd,
98 unsigned long vaddr, unsigned long end,
99 bool early)
Alexandre Ghitid127c192021-02-08 14:30:16 -0500100{
101 phys_addr_t phys_addr;
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100102 pud_t *pudp, *base_pud;
103 unsigned long next;
104
105 if (early) {
106 /*
107 * We can't use pgd_page_vaddr here as it would return a linear
108 * mapping address but it is not mapped yet, but when populating
109 * early_pg_dir, we need the physical address and when populating
110 * swapper_pg_dir, we need the kernel virtual address so use
111 * pt_ops facility.
112 */
113 base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
114 } else {
115 base_pud = (pud_t *)pgd_page_vaddr(*pgd);
116 if (base_pud == lm_alias(kasan_early_shadow_pud))
117 base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
118 }
119
120 pudp = base_pud + pud_index(vaddr);
121
122 do {
123 next = pud_addr_end(vaddr, end);
124
125 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
126 if (early) {
127 phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
128 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
129 continue;
130 } else {
131 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
132 if (phys_addr) {
133 set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
134 continue;
135 }
136 }
137 }
138
139 kasan_populate_pmd(pudp, vaddr, next);
140 } while (pudp++, vaddr = next, vaddr != end);
141
142 /*
143 * Wait for the whole PGD to be populated before setting the PGD in
144 * the page table, otherwise, if we did set the PGD before populating
145 * it entirely, memblock could allocate a page at a physical address
146 * where KASAN is not populated yet and then we'd get a page fault.
147 */
148 if (!early)
149 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
150}
151
152#define kasan_early_shadow_pgd_next (pgtable_l4_enabled ? \
153 (uintptr_t)kasan_early_shadow_pud : \
154 (uintptr_t)kasan_early_shadow_pmd)
155#define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
156 (pgtable_l4_enabled ? \
157 kasan_populate_pud(pgdp, vaddr, next, early) : \
158 kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
159
Alexandre Ghiti2efad172021-12-06 11:46:46 +0100160static void __init kasan_populate_pgd(pgd_t *pgdp,
161 unsigned long vaddr, unsigned long end,
162 bool early)
Alexandre Ghitid127c192021-02-08 14:30:16 -0500163{
164 phys_addr_t phys_addr;
Alexandre Ghitid127c192021-02-08 14:30:16 -0500165 unsigned long next;
166
167 do {
168 next = pgd_addr_end(vaddr, end);
Alexandre Ghitid7fbcf402021-02-08 14:30:17 -0500169
Alexandre Ghiti2efad172021-12-06 11:46:46 +0100170 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
171 if (early) {
172 phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
173 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
Alexandre Ghitid7fbcf402021-02-08 14:30:17 -0500174 continue;
Alexandre Ghiti2efad172021-12-06 11:46:46 +0100175 } else if (pgd_page_vaddr(*pgdp) ==
176 (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
177 /*
178 * pgdp can't be none since kasan_early_init
179 * initialized all KASAN shadow region with
180 * kasan_early_shadow_pud: if this is still the
181 * case, that means we can try to allocate a
182 * hugepage as a replacement.
183 */
184 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
185 if (phys_addr) {
186 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
187 continue;
188 }
Alexandre Ghitid7fbcf402021-02-08 14:30:17 -0500189 }
190 }
191
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100192 kasan_populate_pgd_next(pgdp, vaddr, next, early);
Alexandre Ghitid127c192021-02-08 14:30:16 -0500193 } while (pgdp++, vaddr = next, vaddr != end);
194}
195
Alexandre Ghiti2efad172021-12-06 11:46:46 +0100196asmlinkage void __init kasan_early_init(void)
197{
198 uintptr_t i;
199
200 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
201 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
202
203 for (i = 0; i < PTRS_PER_PTE; ++i)
204 set_pte(kasan_early_shadow_pte + i,
205 mk_pte(virt_to_page(kasan_early_shadow_page),
206 PAGE_KERNEL));
207
208 for (i = 0; i < PTRS_PER_PMD; ++i)
209 set_pmd(kasan_early_shadow_pmd + i,
210 pfn_pmd(PFN_DOWN
211 (__pa((uintptr_t)kasan_early_shadow_pte)),
212 PAGE_TABLE));
213
214 if (pgtable_l4_enabled) {
215 for (i = 0; i < PTRS_PER_PUD; ++i)
216 set_pud(kasan_early_shadow_pud + i,
217 pfn_pud(PFN_DOWN
218 (__pa(((uintptr_t)kasan_early_shadow_pmd))),
219 PAGE_TABLE));
220 }
221
222 kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
223 KASAN_SHADOW_START, KASAN_SHADOW_END, true);
224
225 local_flush_tlb_all();
226}
227
228void __init kasan_swapper_init(void)
229{
230 kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
231 KASAN_SHADOW_START, KASAN_SHADOW_END, true);
232
233 local_flush_tlb_all();
234}
235
Alexandre Ghitid127c192021-02-08 14:30:16 -0500236static void __init kasan_populate(void *start, void *end)
237{
Nick Hu8ad8b722020-01-06 10:38:32 -0800238 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
239 unsigned long vend = PAGE_ALIGN((unsigned long)end);
Zong Lia0a31fd2020-02-07 17:52:44 +0800240
Alexandre Ghiti2efad172021-12-06 11:46:46 +0100241 kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
Nick Hu8ad8b722020-01-06 10:38:32 -0800242
Vincent Chen4cb699d2020-07-10 10:40:54 +0800243 local_flush_tlb_all();
Alexandre Ghiti9484e2a2021-02-08 14:30:15 -0500244 memset(start, KASAN_SHADOW_INIT, end - start);
Nick Hu8ad8b722020-01-06 10:38:32 -0800245}
246
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100247static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
248 unsigned long vaddr, unsigned long end,
249 bool kasan_populate)
250{
251 unsigned long next;
252 pud_t *pudp, *base_pud;
253 pmd_t *base_pmd;
254 bool is_kasan_pmd;
255
256 base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
257 pudp = base_pud + pud_index(vaddr);
258
259 if (kasan_populate)
260 memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
261 sizeof(pud_t) * PTRS_PER_PUD);
262
263 do {
264 next = pud_addr_end(vaddr, end);
265 is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
266
267 if (is_kasan_pmd) {
268 base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
269 set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
270 }
271 } while (pudp++, vaddr = next, vaddr != end);
272}
273
Alexandre Ghiti2da073c2021-03-13 03:45:05 -0500274static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
275{
276 unsigned long next;
277 void *p;
278 pgd_t *pgd_k = pgd_offset_k(vaddr);
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100279 bool is_kasan_pgd_next;
Alexandre Ghiti2da073c2021-03-13 03:45:05 -0500280
281 do {
282 next = pgd_addr_end(vaddr, end);
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100283 is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
284 (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
285
286 if (is_kasan_pgd_next) {
Alexandre Ghiti2da073c2021-03-13 03:45:05 -0500287 p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
288 set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
289 }
Alexandre Ghitie8a62cc2021-12-06 11:46:51 +0100290
291 if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
292 continue;
293
294 kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
Alexandre Ghiti2da073c2021-03-13 03:45:05 -0500295 } while (pgd_k++, vaddr = next, vaddr != end);
296}
297
Palmer Dabbelt78947bd2021-03-16 22:01:04 -0700298static void __init kasan_shallow_populate(void *start, void *end)
Nylon Chene178d672021-01-16 13:58:35 +0800299{
300 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
301 unsigned long vend = PAGE_ALIGN((unsigned long)end);
Nylon Chene178d672021-01-16 13:58:35 +0800302
Alexandre Ghiti2da073c2021-03-13 03:45:05 -0500303 kasan_shallow_populate_pgd(vaddr, vend);
Alexandre Ghitif3773dd2021-03-13 03:45:04 -0500304 local_flush_tlb_all();
Nick Hu8ad8b722020-01-06 10:38:32 -0800305}
306
307void __init kasan_init(void)
308{
Jisheng Zhang314b7812021-06-18 22:01:36 +0800309 phys_addr_t p_start, p_end;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700310 u64 i;
Nick Hu8ad8b722020-01-06 10:38:32 -0800311
Nylon Chene178d672021-01-16 13:58:35 +0800312 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
313 kasan_shallow_populate(
314 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
315 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
Nick Hu8ad8b722020-01-06 10:38:32 -0800316
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -0400317 /* Populate the linear mapping */
Jisheng Zhang314b7812021-06-18 22:01:36 +0800318 for_each_mem_range(i, &p_start, &p_end) {
319 void *start = (void *)__va(p_start);
320 void *end = (void *)__va(p_end);
Nick Hu8ad8b722020-01-06 10:38:32 -0800321
322 if (start >= end)
323 break;
324
Alexandre Ghitid127c192021-02-08 14:30:16 -0500325 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
Yang Li9d8c7d92021-03-22 16:38:36 +0800326 }
Nick Hu8ad8b722020-01-06 10:38:32 -0800327
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -0400328 /* Populate kernel, BPF, modules mapping */
329 kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
Jisheng Zhang3a027642021-06-18 22:09:13 +0800330 kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
Alexandre Ghiti2bfc6cd2021-04-11 12:41:44 -0400331
Nick Hu8ad8b722020-01-06 10:38:32 -0800332 for (i = 0; i < PTRS_PER_PTE; i++)
333 set_pte(&kasan_early_shadow_pte[i],
334 mk_pte(virt_to_page(kasan_early_shadow_page),
Zong Li8458ca12020-02-07 17:52:45 +0800335 __pgprot(_PAGE_PRESENT | _PAGE_READ |
336 _PAGE_ACCESSED)));
Nick Hu8ad8b722020-01-06 10:38:32 -0800337
Alexandre Ghiti9484e2a2021-02-08 14:30:15 -0500338 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
Nick Hu8ad8b722020-01-06 10:38:32 -0800339 init_task.kasan_depth = 0;
340}