blob: 7291b26ce78849feecd12ba2374a2d9345a18a94 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Andrey Ryabinin39d114d2015-10-12 18:52:58 +03002/*
3 * This file contains kasan initialization code for ARM64.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +03007 */
8
9#define pr_fmt(fmt) "kasan: " fmt
10#include <linux/kasan.h>
11#include <linux/kernel.h>
Ingo Molnar9164bb42017-02-04 01:20:53 +010012#include <linux/sched/task.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030013#include <linux/memblock.h>
14#include <linux/start_kernel.h>
Laura Abbott2077be62017-01-10 13:35:49 -080015#include <linux/mm.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030016
Mark Rutlandc1a88e92016-01-25 11:45:02 +000017#include <asm/mmu_context.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010018#include <asm/kernel-pgtable.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030019#include <asm/page.h>
20#include <asm/pgalloc.h>
Ard Biesheuvelf9040772016-02-16 13:52:40 +010021#include <asm/sections.h>
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030022#include <asm/tlbflush.h>
23
24static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
25
Laura Abbott2077be62017-01-10 13:35:49 -080026/*
27 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
28 * directly on kernel symbols (bm_p*d). All the early functions are called too
29 * early to use lm_alias so __p*d_populate functions must be used to populate
30 * with the physical address from __pa_symbol.
31 */
32
Will Deacone17d8022017-11-15 17:36:40 -080033static phys_addr_t __init kasan_alloc_zeroed_page(int node)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +030034{
Mike Rapoporteb31d552018-10-30 15:08:04 -070035 void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
Will Deacone17d8022017-11-15 17:36:40 -080036 __pa(MAX_DMA_ADDRESS),
Qian Caifed84c72018-12-28 00:36:29 -080037 MEMBLOCK_ALLOC_KASAN, node);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070038 if (!p)
39 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
40 __func__, PAGE_SIZE, PAGE_SIZE, node,
41 __pa(MAX_DMA_ADDRESS));
42
Will Deacone17d8022017-11-15 17:36:40 -080043 return __pa(p);
44}
45
Andrey Konovalov080eb832018-12-28 00:30:09 -080046static phys_addr_t __init kasan_alloc_raw_page(int node)
47{
48 void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
49 __pa(MAX_DMA_ADDRESS),
50 MEMBLOCK_ALLOC_KASAN, node);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070051 if (!p)
52 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
53 __func__, PAGE_SIZE, PAGE_SIZE, node,
54 __pa(MAX_DMA_ADDRESS));
55
Andrey Konovalov080eb832018-12-28 00:30:09 -080056 return __pa(p);
57}
58
Will Deacon20a004e2018-02-15 11:14:56 +000059static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080060 bool early)
61{
Will Deacon20a004e2018-02-15 11:14:56 +000062 if (pmd_none(READ_ONCE(*pmdp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080063 phys_addr_t pte_phys = early ?
64 __pa_symbol(kasan_early_shadow_pte)
65 : kasan_alloc_zeroed_page(node);
Will Deacon20a004e2018-02-15 11:14:56 +000066 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080067 }
68
Will Deacon20a004e2018-02-15 11:14:56 +000069 return early ? pte_offset_kimg(pmdp, addr)
70 : pte_offset_kernel(pmdp, addr);
Will Deacone17d8022017-11-15 17:36:40 -080071}
72
Will Deacon20a004e2018-02-15 11:14:56 +000073static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080074 bool early)
75{
Will Deacon20a004e2018-02-15 11:14:56 +000076 if (pud_none(READ_ONCE(*pudp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080077 phys_addr_t pmd_phys = early ?
78 __pa_symbol(kasan_early_shadow_pmd)
79 : kasan_alloc_zeroed_page(node);
Will Deacon20a004e2018-02-15 11:14:56 +000080 __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080081 }
82
Will Deacon20a004e2018-02-15 11:14:56 +000083 return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
Will Deacone17d8022017-11-15 17:36:40 -080084}
85
Mike Rapoporte9f63762020-06-04 16:46:23 -070086static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
Will Deacone17d8022017-11-15 17:36:40 -080087 bool early)
88{
Mike Rapoporte9f63762020-06-04 16:46:23 -070089 if (p4d_none(READ_ONCE(*p4dp))) {
Andrey Konovalov9577dd72018-12-28 00:30:01 -080090 phys_addr_t pud_phys = early ?
91 __pa_symbol(kasan_early_shadow_pud)
92 : kasan_alloc_zeroed_page(node);
Mike Rapoporte9f63762020-06-04 16:46:23 -070093 __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
Will Deacone17d8022017-11-15 17:36:40 -080094 }
95
Mike Rapoporte9f63762020-06-04 16:46:23 -070096 return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
Will Deacone17d8022017-11-15 17:36:40 -080097}
98
Will Deacon20a004e2018-02-15 11:14:56 +000099static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800100 unsigned long end, int node, bool early)
101{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300102 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000103 pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300104
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300105 do {
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800106 phys_addr_t page_phys = early ?
107 __pa_symbol(kasan_early_shadow_page)
Andrey Konovalov080eb832018-12-28 00:30:09 -0800108 : kasan_alloc_raw_page(node);
109 if (!early)
110 memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300111 next = addr + PAGE_SIZE;
Will Deacon20a004e2018-02-15 11:14:56 +0000112 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
113 } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300114}
115
Will Deacon20a004e2018-02-15 11:14:56 +0000116static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800117 unsigned long end, int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300118{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300119 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000120 pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300121
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300122 do {
123 next = pmd_addr_end(addr, end);
Will Deacon20a004e2018-02-15 11:14:56 +0000124 kasan_pte_populate(pmdp, addr, next, node, early);
125 } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300126}
127
Mike Rapoporte9f63762020-06-04 16:46:23 -0700128static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
Will Deacone17d8022017-11-15 17:36:40 -0800129 unsigned long end, int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300130{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300131 unsigned long next;
Mike Rapoporte9f63762020-06-04 16:46:23 -0700132 pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300133
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300134 do {
135 next = pud_addr_end(addr, end);
Will Deacon20a004e2018-02-15 11:14:56 +0000136 kasan_pmd_populate(pudp, addr, next, node, early);
137 } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300138}
139
Mike Rapoporte9f63762020-06-04 16:46:23 -0700140static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
141 unsigned long end, int node, bool early)
142{
143 unsigned long next;
144 p4d_t *p4dp = p4d_offset(pgdp, addr);
145
146 do {
147 next = p4d_addr_end(addr, end);
148 kasan_pud_populate(p4dp, addr, next, node, early);
149 } while (p4dp++, addr = next, addr != end);
150}
151
Will Deacone17d8022017-11-15 17:36:40 -0800152static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
153 int node, bool early)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300154{
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300155 unsigned long next;
Will Deacon20a004e2018-02-15 11:14:56 +0000156 pgd_t *pgdp;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300157
Will Deacon20a004e2018-02-15 11:14:56 +0000158 pgdp = pgd_offset_k(addr);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300159 do {
160 next = pgd_addr_end(addr, end);
Mike Rapoporte9f63762020-06-04 16:46:23 -0700161 kasan_p4d_populate(pgdp, addr, next, node, early);
Will Deacon20a004e2018-02-15 11:14:56 +0000162 } while (pgdp++, addr = next, addr != end);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300163}
164
Will Deacone17d8022017-11-15 17:36:40 -0800165/* The early shadow maps everything to a single page of zeroes */
Will Deacon83040122015-10-13 14:01:06 +0100166asmlinkage void __init kasan_early_init(void)
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300167{
Andrey Konovalov917538e2018-02-06 15:36:44 -0800168 BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
169 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
Steve Capper90ec95c2019-08-07 16:55:17 +0100170 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
171 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300172 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
Will Deacone17d8022017-11-15 17:36:40 -0800173 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
174 true);
175}
176
177/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
178static void __init kasan_map_populate(unsigned long start, unsigned long end,
179 int node)
180{
181 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300182}
183
Mark Rutland068a17a2016-01-25 11:45:12 +0000184/*
185 * Copy the current shadow region into a new pgdir.
186 */
187void __init kasan_copy_shadow(pgd_t *pgdir)
188{
Will Deacon20a004e2018-02-15 11:14:56 +0000189 pgd_t *pgdp, *pgdp_new, *pgdp_end;
Mark Rutland068a17a2016-01-25 11:45:12 +0000190
Will Deacon20a004e2018-02-15 11:14:56 +0000191 pgdp = pgd_offset_k(KASAN_SHADOW_START);
192 pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
Mike Rapoport974b9b22020-06-08 21:33:10 -0700193 pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
Mark Rutland068a17a2016-01-25 11:45:12 +0000194 do {
Will Deacon20a004e2018-02-15 11:14:56 +0000195 set_pgd(pgdp_new, READ_ONCE(*pgdp));
196 } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
Mark Rutland068a17a2016-01-25 11:45:12 +0000197}
198
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300199static void __init clear_pgds(unsigned long start,
200 unsigned long end)
201{
202 /*
203 * Remove references to kasan page tables from
204 * swapper_pg_dir. pgd_clear() can't be used
205 * here because it's nop on 2,3-level pagetable setups
206 */
207 for (; start < end; start += PGDIR_SIZE)
208 set_pgd(pgd_offset_k(start), __pgd(0));
209}
210
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300211void __init kasan_init(void)
212{
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100213 u64 kimg_shadow_start, kimg_shadow_end;
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100214 u64 mod_shadow_start, mod_shadow_end;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300215 struct memblock_region *reg;
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100216 int i;
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300217
Will Deacone17d8022017-11-15 17:36:40 -0800218 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
219 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100220
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100221 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
222 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
223
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300224 /*
225 * We are going to perform proper setup of shadow memory.
Kyrylo Tkachov0293c8b2018-10-04 17:06:46 +0100226 * At first we should unmap early shadow (clear_pgds() call below).
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300227 * However, instrumented code couldn't execute without shadow memory.
228 * tmp_pg_dir used to keep early shadow mapped until full shadow
229 * setup will be finished.
230 */
231 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
Mark Rutlandc1a88e92016-01-25 11:45:02 +0000232 dsb(ishst);
Laura Abbott2077be62017-01-10 13:35:49 -0800233 cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300234
235 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
236
Will Deacone17d8022017-11-15 17:36:40 -0800237 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
Mark Rutland800cb2e2018-04-16 14:44:41 +0100238 early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
Ard Biesheuvelf9040772016-02-16 13:52:40 +0100239
Mark Rutland77ad4ce2019-08-14 14:28:48 +0100240 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
Steve Capper14c127c2019-08-07 16:55:14 +0100241 (void *)mod_shadow_start);
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800242 kasan_populate_early_shadow((void *)kimg_shadow_end,
Steve Capper14c127c2019-08-07 16:55:14 +0100243 (void *)KASAN_SHADOW_END);
Ard Biesheuvelf80fb3a2016-01-26 14:12:01 +0100244
245 if (kimg_shadow_start > mod_shadow_end)
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800246 kasan_populate_early_shadow((void *)mod_shadow_end,
247 (void *)kimg_shadow_start);
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300248
249 for_each_memblock(memory, reg) {
250 void *start = (void *)__phys_to_virt(reg->base);
251 void *end = (void *)__phys_to_virt(reg->base + reg->size);
252
253 if (start >= end)
254 break;
255
Will Deacone17d8022017-11-15 17:36:40 -0800256 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
257 (unsigned long)kasan_mem_to_shadow(end),
Mark Rutland800cb2e2018-04-16 14:44:41 +0100258 early_pfn_to_nid(virt_to_pfn(start)));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300259 }
260
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100261 /*
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800262 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
263 * so we should make sure that it maps the zero page read-only.
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100264 */
265 for (i = 0; i < PTRS_PER_PTE; i++)
Andrey Konovalov9577dd72018-12-28 00:30:01 -0800266 set_pte(&kasan_early_shadow_pte[i],
267 pfn_pte(sym_to_pfn(kasan_early_shadow_page),
268 PAGE_KERNEL_RO));
Ard Biesheuvel7b1af972016-01-11 14:50:21 +0100269
Andrey Konovalov080eb832018-12-28 00:30:09 -0800270 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
Laura Abbott2077be62017-01-10 13:35:49 -0800271 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300272
Andrey Ryabinin39d114d2015-10-12 18:52:58 +0300273 /* At this point kasan is fully initialized. Enable error messages */
274 init_task.kasan_depth = 0;
275 pr_info("KernelAddressSanitizer initialized\n");
276}