blob: 906e4e4328b2e0f53adece1e96381c38e43ac76e [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100012 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015 */
16
Paul Mackerras14cf11a2005-09-26 16:04:21 +100017#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100024#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Christophe Leroyc988cfd2021-06-09 11:34:31 +100026#include <linux/set_memory.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100027
Paul Mackerras14cf11a2005-09-26 16:04:21 +100028#include <asm/pgalloc.h>
Kumar Gala2c419bd2008-04-23 23:05:20 +100029#include <asm/fixmap.h>
David Howellsae3a1972012-03-28 18:30:02 +010030#include <asm/setup.h>
Christophe Leroy95902e62017-08-02 15:51:05 +020031#include <asm/sections.h>
Christophe Leroy925ac142020-05-19 05:48:58 +000032#include <asm/early_ioremap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100033
Christophe Leroy9d9f2cc2019-03-29 09:59:59 +000034#include <mm/mmu_decl.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100035
Christophe Leroy925ac142020-05-19 05:48:58 +000036static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
37
38notrace void __init early_ioremap_init(void)
39{
40 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
41 pte_t *ptep = (pte_t *)early_fixmap_pagetable;
Mike Rapoporte05c7b12020-06-08 21:33:05 -070042 pmd_t *pmdp = pmd_off_k(addr);
Christophe Leroy925ac142020-05-19 05:48:58 +000043
44 for (; (s32)(FIXADDR_TOP - addr) > 0;
45 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
46 pmd_populate_kernel(&init_mm, pmdp, ptep);
47
48 early_ioremap_setup();
49}
50
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000051static void __init *early_alloc_pgtable(unsigned long size)
52{
53 void *ptr = memblock_alloc(size, size);
54
55 if (!ptr)
56 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
57 __func__, size, size);
58
59 return ptr;
60}
61
Christophe Leroy34536d72020-05-19 05:49:22 +000062pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000063{
64 if (pmd_none(*pmdp)) {
65 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
66
67 pmd_populate_kernel(&init_mm, pmdp, ptep);
68 }
69 return pte_offset_kernel(pmdp, va);
70}
71
72
73int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100074{
75 pmd_t *pd;
76 pte_t *pg;
77 int err = -ENOMEM;
78
Paul Mackerras14cf11a2005-09-26 16:04:21 +100079 /* Use upper 10 bits of VA to index the first level map */
Mike Rapoporte05c7b12020-06-08 21:33:05 -070080 pd = pmd_off_k(va);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100081 /* Use middle 10 bits of VA to index the second-level map */
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000082 if (likely(slab_is_available()))
83 pg = pte_alloc_kernel(pd, va);
84 else
85 pg = early_pte_alloc_kernel(pd, va);
Kaixu Xiab84bf092020-11-10 10:56:01 +080086 if (pg) {
Paul Mackerras14cf11a2005-09-26 16:04:21 +100087 err = 0;
Benjamin Herrenschmidt3be4e692007-04-12 15:30:21 +100088 /* The PTE should never be already set nor present in the
89 * hash table
90 */
Christophe Leroy26973fa2018-10-09 13:51:56 +000091 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
Christophe Leroyc766ee72018-10-09 13:51:45 +000092 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
Paul Mackerras14cf11a2005-09-26 16:04:21 +100093 }
Scott Wood47ce8af2013-10-11 19:22:37 -050094 smp_wmb();
Paul Mackerras14cf11a2005-09-26 16:04:21 +100095 return err;
96}
97
98/*
Albert Herranzde324002009-12-12 06:31:53 +000099 * Map in a chunk of physical memory starting at start.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000100 */
Christophe Leroy86b19522017-08-02 15:51:07 +0200101static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000102{
Christophe Leroyc766ee72018-10-09 13:51:45 +0000103 unsigned long v, s;
Kumar Gala99c62dd72008-04-16 05:52:21 +1000104 phys_addr_t p;
Kefeng Wang843a1ff2021-11-08 18:34:13 -0800105 bool ktext;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000106
Albert Herranzde324002009-12-12 06:31:53 +0000107 s = offset;
Dale Farnsworthccdcef72008-12-17 10:09:13 +0000108 v = PAGE_OFFSET + s;
Kumar Gala99c62dd72008-04-16 05:52:21 +1000109 p = memstart_addr + s;
Albert Herranzde324002009-12-12 06:31:53 +0000110 for (; s < top; s += PAGE_SIZE) {
Kefeng Wang843a1ff2021-11-08 18:34:13 -0800111 ktext = core_kernel_text(v);
Christophe Leroyc766ee72018-10-09 13:51:45 +0000112 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000113 v += PAGE_SIZE;
114 p += PAGE_SIZE;
115 }
116}
117
Albert Herranzde324002009-12-12 06:31:53 +0000118void __init mapin_ram(void)
119{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700120 phys_addr_t base, end;
121 u64 i;
Albert Herranzde324002009-12-12 06:31:53 +0000122
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700123 for_each_mem_range(i, &base, &end) {
124 phys_addr_t top = min(end, total_lowmem);
Albert Herranzde324002009-12-12 06:31:53 +0000125
Christophe Leroy9e849f232019-02-21 19:08:40 +0000126 if (base >= top)
127 continue;
128 base = mmu_mapin_ram(base, top);
Christophe Leroya2227a22019-08-23 09:56:21 +0000129 __mapin_ram_chunk(base, top);
Albert Herranzde324002009-12-12 06:31:53 +0000130 }
Albert Herranzde324002009-12-12 06:31:53 +0000131}
132
Christophe Leroy3184cc42017-08-02 15:51:03 +0200133void mark_initmem_nx(void)
134{
Christophe Leroy3184cc42017-08-02 15:51:03 +0200135 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
136 PFN_DOWN((unsigned long)_sinittext);
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000137
Christophe Leroy4e3319c2020-05-19 05:48:59 +0000138 if (v_block_mapped((unsigned long)_sinittext))
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000139 mmu_mark_initmem_nx();
140 else
Christophe Leroyc988cfd2021-06-09 11:34:31 +1000141 set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
Christophe Leroy3184cc42017-08-02 15:51:03 +0200142}
143
Christophe Leroy95902e62017-08-02 15:51:05 +0200144#ifdef CONFIG_STRICT_KERNEL_RWX
145void mark_rodata_ro(void)
146{
Christophe Leroy95902e62017-08-02 15:51:05 +0200147 unsigned long numpages;
148
Christophe Leroy4e3319c2020-05-19 05:48:59 +0000149 if (v_block_mapped((unsigned long)_stext + 1)) {
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000150 mmu_mark_rodata_ro();
Christophe Leroye26ad932020-01-14 08:13:08 +0000151 ptdump_check_wx();
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000152 return;
153 }
154
Christophe Leroy95902e62017-08-02 15:51:05 +0200155 numpages = PFN_UP((unsigned long)_etext) -
156 PFN_DOWN((unsigned long)_stext);
157
Christophe Leroyc988cfd2021-06-09 11:34:31 +1000158 set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
Christophe Leroy95902e62017-08-02 15:51:05 +0200159 /*
160 * mark .rodata as read only. Use __init_begin rather than __end_rodata
161 * to cover NOTES and EXCEPTION_TABLE.
162 */
Christophe Leroy95902e62017-08-02 15:51:05 +0200163 numpages = PFN_UP((unsigned long)__init_begin) -
164 PFN_DOWN((unsigned long)__start_rodata);
165
Christophe Leroyc988cfd2021-06-09 11:34:31 +1000166 set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
Russell Currey453d87f2019-05-02 17:39:47 +1000167
168 // mark_initmem_nx() should have already run by now
169 ptdump_check_wx();
Christophe Leroy95902e62017-08-02 15:51:05 +0200170}
171#endif
172
Christophe Leroyf8c0e362021-10-28 14:59:15 +0200173#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
Joonsoo Kim031bc572014-12-12 16:55:52 -0800174void __kernel_map_pages(struct page *page, int numpages, int enable)
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000175{
Christophe Leroyc988cfd2021-06-09 11:34:31 +1000176 unsigned long addr = (unsigned long)page_address(page);
177
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000178 if (PageHighMem(page))
179 return;
180
Christophe Leroyc988cfd2021-06-09 11:34:31 +1000181 if (enable)
182 set_memory_attr(addr, numpages, PAGE_KERNEL);
183 else
184 set_memory_attr(addr, numpages, __pgprot(0));
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000185}
186#endif /* CONFIG_DEBUG_PAGEALLOC */