blob: 9c0063b851ff9d2b265ecdc02e2ba9adf367795b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100012 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015 */
16
Paul Mackerras14cf11a2005-09-26 16:04:21 +100017#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100024#include <linux/memblock.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100026
Paul Mackerras14cf11a2005-09-26 16:04:21 +100027#include <asm/pgalloc.h>
Kumar Gala2c419bd2008-04-23 23:05:20 +100028#include <asm/fixmap.h>
David Howellsae3a1972012-03-28 18:30:02 +010029#include <asm/setup.h>
Christophe Leroy95902e62017-08-02 15:51:05 +020030#include <asm/sections.h>
Christophe Leroy925ac142020-05-19 05:48:58 +000031#include <asm/early_ioremap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100032
Christophe Leroy9d9f2cc2019-03-29 09:59:59 +000033#include <mm/mmu_decl.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100034
Christophe Leroy060ef9d2016-02-10 08:17:08 +010035extern char etext[], _stext[], _sinittext[], _einittext[];
Paul Mackerras14cf11a2005-09-26 16:04:21 +100036
Christophe Leroy925ac142020-05-19 05:48:58 +000037static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
38
39notrace void __init early_ioremap_init(void)
40{
41 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
42 pte_t *ptep = (pte_t *)early_fixmap_pagetable;
43 pmd_t *pmdp = pmd_ptr_k(addr);
44
45 for (; (s32)(FIXADDR_TOP - addr) > 0;
46 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
47 pmd_populate_kernel(&init_mm, pmdp, ptep);
48
49 early_ioremap_setup();
50}
51
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000052static void __init *early_alloc_pgtable(unsigned long size)
53{
54 void *ptr = memblock_alloc(size, size);
55
56 if (!ptr)
57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58 __func__, size, size);
59
60 return ptr;
61}
62
Christophe Leroy34536d72020-05-19 05:49:22 +000063pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000064{
65 if (pmd_none(*pmdp)) {
66 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
67
68 pmd_populate_kernel(&init_mm, pmdp, ptep);
69 }
70 return pte_offset_kernel(pmdp, va);
71}
72
73
74int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100075{
76 pmd_t *pd;
77 pte_t *pg;
78 int err = -ENOMEM;
79
Paul Mackerras14cf11a2005-09-26 16:04:21 +100080 /* Use upper 10 bits of VA to index the first level map */
Christophe Leroy0b1c5242020-01-09 08:25:25 +000081 pd = pmd_ptr_k(va);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100082 /* Use middle 10 bits of VA to index the second-level map */
Christophe Leroy4a6d8cf2019-04-26 15:58:06 +000083 if (likely(slab_is_available()))
84 pg = pte_alloc_kernel(pd, va);
85 else
86 pg = early_pte_alloc_kernel(pd, va);
Paul Mackerras14cf11a2005-09-26 16:04:21 +100087 if (pg != 0) {
88 err = 0;
Benjamin Herrenschmidt3be4e692007-04-12 15:30:21 +100089 /* The PTE should never be already set nor present in the
90 * hash table
91 */
Christophe Leroy26973fa2018-10-09 13:51:56 +000092 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
Christophe Leroyc766ee72018-10-09 13:51:45 +000093 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
Paul Mackerras14cf11a2005-09-26 16:04:21 +100094 }
Scott Wood47ce8af2013-10-11 19:22:37 -050095 smp_wmb();
Paul Mackerras14cf11a2005-09-26 16:04:21 +100096 return err;
97}
98
99/*
Albert Herranzde324002009-12-12 06:31:53 +0000100 * Map in a chunk of physical memory starting at start.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000101 */
Christophe Leroy86b19522017-08-02 15:51:07 +0200102static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000103{
Christophe Leroyc766ee72018-10-09 13:51:45 +0000104 unsigned long v, s;
Kumar Gala99c62dd72008-04-16 05:52:21 +1000105 phys_addr_t p;
Benjamin Herrenschmidtee4f2ea2007-04-12 15:30:22 +1000106 int ktext;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000107
Albert Herranzde324002009-12-12 06:31:53 +0000108 s = offset;
Dale Farnsworthccdcef72008-12-17 10:09:13 +0000109 v = PAGE_OFFSET + s;
Kumar Gala99c62dd72008-04-16 05:52:21 +1000110 p = memstart_addr + s;
Albert Herranzde324002009-12-12 06:31:53 +0000111 for (; s < top; s += PAGE_SIZE) {
Christophe Leroy060ef9d2016-02-10 08:17:08 +0100112 ktext = ((char *)v >= _stext && (char *)v < etext) ||
113 ((char *)v >= _sinittext && (char *)v < _einittext);
Christophe Leroyc766ee72018-10-09 13:51:45 +0000114 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
Christophe Leroy68289ae2018-11-17 10:25:02 +0000115#ifdef CONFIG_PPC_BOOK3S_32
Benjamin Herrenschmidtee4f2ea2007-04-12 15:30:22 +1000116 if (ktext)
Christophe Leroyf49f4e22019-08-16 05:41:43 +0000117 hash_preload(&init_mm, v);
Benjamin Herrenschmidtee4f2ea2007-04-12 15:30:22 +1000118#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000119 v += PAGE_SIZE;
120 p += PAGE_SIZE;
121 }
122}
123
Albert Herranzde324002009-12-12 06:31:53 +0000124void __init mapin_ram(void)
125{
Christophe Leroy9e849f232019-02-21 19:08:40 +0000126 struct memblock_region *reg;
Albert Herranzde324002009-12-12 06:31:53 +0000127
Christophe Leroy9e849f232019-02-21 19:08:40 +0000128 for_each_memblock(memory, reg) {
129 phys_addr_t base = reg->base;
130 phys_addr_t top = min(base + reg->size, total_lowmem);
Albert Herranzde324002009-12-12 06:31:53 +0000131
Christophe Leroy9e849f232019-02-21 19:08:40 +0000132 if (base >= top)
133 continue;
134 base = mmu_mapin_ram(base, top);
Christophe Leroya2227a22019-08-23 09:56:21 +0000135 __mapin_ram_chunk(base, top);
Albert Herranzde324002009-12-12 06:31:53 +0000136 }
Albert Herranzde324002009-12-12 06:31:53 +0000137}
138
Christophe Leroye6119392017-08-02 15:51:01 +0200139static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000140{
141 pte_t *kpte;
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000142 unsigned long address;
143
144 BUG_ON(PageHighMem(page));
145 address = (unsigned long)page_address(page);
146
Christophe Leroy3084cdb2016-02-09 17:07:58 +0100147 if (v_block_mapped(address))
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000148 return 0;
Christophe Leroy2efc7c02020-01-09 08:25:26 +0000149 kpte = virt_to_kpte(address);
150 if (!kpte)
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000151 return -EINVAL;
Benjamin Herrenschmidt50891452009-12-08 21:08:44 +0000152 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000153
154 return 0;
155}
156
157/*
158 * Change the page attributes of an page in the linear mapping.
159 *
Christophe Leroy3184cc42017-08-02 15:51:03 +0200160 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000161 */
162static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
163{
164 int i, err = 0;
165 unsigned long flags;
Christophe Leroye6119392017-08-02 15:51:01 +0200166 struct page *start = page;
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000167
168 local_irq_save(flags);
169 for (i = 0; i < numpages; i++, page++) {
Christophe Leroye6119392017-08-02 15:51:01 +0200170 err = __change_page_attr_noflush(page, prot);
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000171 if (err)
172 break;
173 }
Christophe Leroye6119392017-08-02 15:51:01 +0200174 wmb();
Guenter Roeck7c6a4f32017-09-24 10:30:43 -0700175 local_irq_restore(flags);
Christophe Leroye6119392017-08-02 15:51:01 +0200176 flush_tlb_kernel_range((unsigned long)page_address(start),
177 (unsigned long)page_address(page));
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000178 return err;
179}
180
Christophe Leroy3184cc42017-08-02 15:51:03 +0200181void mark_initmem_nx(void)
182{
183 struct page *page = virt_to_page(_sinittext);
184 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
185 PFN_DOWN((unsigned long)_sinittext);
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000186
Christophe Leroy4e3319c2020-05-19 05:48:59 +0000187 if (v_block_mapped((unsigned long)_sinittext))
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000188 mmu_mark_initmem_nx();
189 else
190 change_page_attr(page, numpages, PAGE_KERNEL);
Christophe Leroy3184cc42017-08-02 15:51:03 +0200191}
192
Christophe Leroy95902e62017-08-02 15:51:05 +0200193#ifdef CONFIG_STRICT_KERNEL_RWX
194void mark_rodata_ro(void)
195{
196 struct page *page;
197 unsigned long numpages;
198
Christophe Leroy4e3319c2020-05-19 05:48:59 +0000199 if (v_block_mapped((unsigned long)_stext + 1)) {
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000200 mmu_mark_rodata_ro();
Christophe Leroye26ad932020-01-14 08:13:08 +0000201 ptdump_check_wx();
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000202 return;
203 }
204
Christophe Leroy95902e62017-08-02 15:51:05 +0200205 page = virt_to_page(_stext);
206 numpages = PFN_UP((unsigned long)_etext) -
207 PFN_DOWN((unsigned long)_stext);
208
209 change_page_attr(page, numpages, PAGE_KERNEL_ROX);
210 /*
211 * mark .rodata as read only. Use __init_begin rather than __end_rodata
212 * to cover NOTES and EXCEPTION_TABLE.
213 */
214 page = virt_to_page(__start_rodata);
215 numpages = PFN_UP((unsigned long)__init_begin) -
216 PFN_DOWN((unsigned long)__start_rodata);
217
218 change_page_attr(page, numpages, PAGE_KERNEL_RO);
Russell Currey453d87f2019-05-02 17:39:47 +1000219
220 // mark_initmem_nx() should have already run by now
221 ptdump_check_wx();
Christophe Leroy95902e62017-08-02 15:51:05 +0200222}
223#endif
224
Christophe Leroy3184cc42017-08-02 15:51:03 +0200225#ifdef CONFIG_DEBUG_PAGEALLOC
Joonsoo Kim031bc572014-12-12 16:55:52 -0800226void __kernel_map_pages(struct page *page, int numpages, int enable)
Benjamin Herrenschmidt88df6e92007-04-12 15:30:22 +1000227{
228 if (PageHighMem(page))
229 return;
230
231 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
232}
233#endif /* CONFIG_DEBUG_PAGEALLOC */