Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 | /* |
| 3 | * This file contains the routines setting up the linux page tables. |
| 4 | * -- paulus |
| 5 | * |
| 6 | * Derived from arch/ppc/mm/init.c: |
| 7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
| 8 | * |
| 9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) |
| 10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) |
| 11 | * Copyright (C) 1996 Paul Mackerras |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 12 | * |
| 13 | * Derived from "arch/i386/mm/init.c" |
| 14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 15 | */ |
| 16 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 17 | #include <linux/kernel.h> |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/types.h> |
| 20 | #include <linux/mm.h> |
| 21 | #include <linux/vmalloc.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/highmem.h> |
Yinghai Lu | 95f72d1 | 2010-07-12 14:36:09 +1000 | [diff] [blame] | 24 | #include <linux/memblock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/slab.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 26 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 27 | #include <asm/pgalloc.h> |
Kumar Gala | 2c419bd | 2008-04-23 23:05:20 +1000 | [diff] [blame] | 28 | #include <asm/fixmap.h> |
David Howells | ae3a197 | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 29 | #include <asm/setup.h> |
Christophe Leroy | 95902e6 | 2017-08-02 15:51:05 +0200 | [diff] [blame] | 30 | #include <asm/sections.h> |
Christophe Leroy | 925ac14 | 2020-05-19 05:48:58 +0000 | [diff] [blame] | 31 | #include <asm/early_ioremap.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 32 | |
Christophe Leroy | 9d9f2cc | 2019-03-29 09:59:59 +0000 | [diff] [blame] | 33 | #include <mm/mmu_decl.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 34 | |
Christophe Leroy | 060ef9d | 2016-02-10 08:17:08 +0100 | [diff] [blame] | 35 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 36 | |
Christophe Leroy | 925ac14 | 2020-05-19 05:48:58 +0000 | [diff] [blame] | 37 | static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; |
| 38 | |
| 39 | notrace void __init early_ioremap_init(void) |
| 40 | { |
| 41 | unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); |
| 42 | pte_t *ptep = (pte_t *)early_fixmap_pagetable; |
| 43 | pmd_t *pmdp = pmd_ptr_k(addr); |
| 44 | |
| 45 | for (; (s32)(FIXADDR_TOP - addr) > 0; |
| 46 | addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) |
| 47 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 48 | |
| 49 | early_ioremap_setup(); |
| 50 | } |
| 51 | |
Christophe Leroy | 4a6d8cf | 2019-04-26 15:58:06 +0000 | [diff] [blame] | 52 | static void __init *early_alloc_pgtable(unsigned long size) |
| 53 | { |
| 54 | void *ptr = memblock_alloc(size, size); |
| 55 | |
| 56 | if (!ptr) |
| 57 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 58 | __func__, size, size); |
| 59 | |
| 60 | return ptr; |
| 61 | } |
| 62 | |
Christophe Leroy | 34536d7 | 2020-05-19 05:49:22 +0000 | [diff] [blame] | 63 | pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) |
Christophe Leroy | 4a6d8cf | 2019-04-26 15:58:06 +0000 | [diff] [blame] | 64 | { |
| 65 | if (pmd_none(*pmdp)) { |
| 66 | pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); |
| 67 | |
| 68 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 69 | } |
| 70 | return pte_offset_kernel(pmdp, va); |
| 71 | } |
| 72 | |
| 73 | |
| 74 | int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 75 | { |
| 76 | pmd_t *pd; |
| 77 | pte_t *pg; |
| 78 | int err = -ENOMEM; |
| 79 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 80 | /* Use upper 10 bits of VA to index the first level map */ |
Christophe Leroy | 0b1c524 | 2020-01-09 08:25:25 +0000 | [diff] [blame] | 81 | pd = pmd_ptr_k(va); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 82 | /* Use middle 10 bits of VA to index the second-level map */ |
Christophe Leroy | 4a6d8cf | 2019-04-26 15:58:06 +0000 | [diff] [blame] | 83 | if (likely(slab_is_available())) |
| 84 | pg = pte_alloc_kernel(pd, va); |
| 85 | else |
| 86 | pg = early_pte_alloc_kernel(pd, va); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 87 | if (pg != 0) { |
| 88 | err = 0; |
Benjamin Herrenschmidt | 3be4e69 | 2007-04-12 15:30:21 +1000 | [diff] [blame] | 89 | /* The PTE should never be already set nor present in the |
| 90 | * hash table |
| 91 | */ |
Christophe Leroy | 26973fa | 2018-10-09 13:51:56 +0000 | [diff] [blame] | 92 | BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 93 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 94 | } |
Scott Wood | 47ce8af | 2013-10-11 19:22:37 -0500 | [diff] [blame] | 95 | smp_wmb(); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 96 | return err; |
| 97 | } |
| 98 | |
| 99 | /* |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 100 | * Map in a chunk of physical memory starting at start. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 101 | */ |
Christophe Leroy | 86b1952 | 2017-08-02 15:51:07 +0200 | [diff] [blame] | 102 | static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 103 | { |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 104 | unsigned long v, s; |
Kumar Gala | 99c62dd7 | 2008-04-16 05:52:21 +1000 | [diff] [blame] | 105 | phys_addr_t p; |
Benjamin Herrenschmidt | ee4f2ea | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 106 | int ktext; |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 107 | |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 108 | s = offset; |
Dale Farnsworth | ccdcef7 | 2008-12-17 10:09:13 +0000 | [diff] [blame] | 109 | v = PAGE_OFFSET + s; |
Kumar Gala | 99c62dd7 | 2008-04-16 05:52:21 +1000 | [diff] [blame] | 110 | p = memstart_addr + s; |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 111 | for (; s < top; s += PAGE_SIZE) { |
Christophe Leroy | 060ef9d | 2016-02-10 08:17:08 +0100 | [diff] [blame] | 112 | ktext = ((char *)v >= _stext && (char *)v < etext) || |
| 113 | ((char *)v >= _sinittext && (char *)v < _einittext); |
Christophe Leroy | c766ee7 | 2018-10-09 13:51:45 +0000 | [diff] [blame] | 114 | map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); |
Christophe Leroy | 68289ae | 2018-11-17 10:25:02 +0000 | [diff] [blame] | 115 | #ifdef CONFIG_PPC_BOOK3S_32 |
Benjamin Herrenschmidt | ee4f2ea | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 116 | if (ktext) |
Christophe Leroy | f49f4e2 | 2019-08-16 05:41:43 +0000 | [diff] [blame] | 117 | hash_preload(&init_mm, v); |
Benjamin Herrenschmidt | ee4f2ea | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 118 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 119 | v += PAGE_SIZE; |
| 120 | p += PAGE_SIZE; |
| 121 | } |
| 122 | } |
| 123 | |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 124 | void __init mapin_ram(void) |
| 125 | { |
Christophe Leroy | 9e849f23 | 2019-02-21 19:08:40 +0000 | [diff] [blame] | 126 | struct memblock_region *reg; |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 127 | |
Christophe Leroy | 9e849f23 | 2019-02-21 19:08:40 +0000 | [diff] [blame] | 128 | for_each_memblock(memory, reg) { |
| 129 | phys_addr_t base = reg->base; |
| 130 | phys_addr_t top = min(base + reg->size, total_lowmem); |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 131 | |
Christophe Leroy | 9e849f23 | 2019-02-21 19:08:40 +0000 | [diff] [blame] | 132 | if (base >= top) |
| 133 | continue; |
| 134 | base = mmu_mapin_ram(base, top); |
Christophe Leroy | a2227a2 | 2019-08-23 09:56:21 +0000 | [diff] [blame] | 135 | __mapin_ram_chunk(base, top); |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 136 | } |
Albert Herranz | de32400 | 2009-12-12 06:31:53 +0000 | [diff] [blame] | 137 | } |
| 138 | |
Christophe Leroy | e611939 | 2017-08-02 15:51:01 +0200 | [diff] [blame] | 139 | static int __change_page_attr_noflush(struct page *page, pgprot_t prot) |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 140 | { |
| 141 | pte_t *kpte; |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 142 | unsigned long address; |
| 143 | |
| 144 | BUG_ON(PageHighMem(page)); |
| 145 | address = (unsigned long)page_address(page); |
| 146 | |
Christophe Leroy | 3084cdb | 2016-02-09 17:07:58 +0100 | [diff] [blame] | 147 | if (v_block_mapped(address)) |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 148 | return 0; |
Christophe Leroy | 2efc7c0 | 2020-01-09 08:25:26 +0000 | [diff] [blame] | 149 | kpte = virt_to_kpte(address); |
| 150 | if (!kpte) |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 151 | return -EINVAL; |
Benjamin Herrenschmidt | 5089145 | 2009-12-08 21:08:44 +0000 | [diff] [blame] | 152 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 153 | |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | * Change the page attributes of an page in the linear mapping. |
| 159 | * |
Christophe Leroy | 3184cc4 | 2017-08-02 15:51:03 +0200 | [diff] [blame] | 160 | * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 161 | */ |
| 162 | static int change_page_attr(struct page *page, int numpages, pgprot_t prot) |
| 163 | { |
| 164 | int i, err = 0; |
| 165 | unsigned long flags; |
Christophe Leroy | e611939 | 2017-08-02 15:51:01 +0200 | [diff] [blame] | 166 | struct page *start = page; |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 167 | |
| 168 | local_irq_save(flags); |
| 169 | for (i = 0; i < numpages; i++, page++) { |
Christophe Leroy | e611939 | 2017-08-02 15:51:01 +0200 | [diff] [blame] | 170 | err = __change_page_attr_noflush(page, prot); |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 171 | if (err) |
| 172 | break; |
| 173 | } |
Christophe Leroy | e611939 | 2017-08-02 15:51:01 +0200 | [diff] [blame] | 174 | wmb(); |
Guenter Roeck | 7c6a4f3 | 2017-09-24 10:30:43 -0700 | [diff] [blame] | 175 | local_irq_restore(flags); |
Christophe Leroy | e611939 | 2017-08-02 15:51:01 +0200 | [diff] [blame] | 176 | flush_tlb_kernel_range((unsigned long)page_address(start), |
| 177 | (unsigned long)page_address(page)); |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 178 | return err; |
| 179 | } |
| 180 | |
Christophe Leroy | 3184cc4 | 2017-08-02 15:51:03 +0200 | [diff] [blame] | 181 | void mark_initmem_nx(void) |
| 182 | { |
| 183 | struct page *page = virt_to_page(_sinittext); |
| 184 | unsigned long numpages = PFN_UP((unsigned long)_einittext) - |
| 185 | PFN_DOWN((unsigned long)_sinittext); |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 186 | |
Christophe Leroy | 4e3319c | 2020-05-19 05:48:59 +0000 | [diff] [blame] | 187 | if (v_block_mapped((unsigned long)_sinittext)) |
Christophe Leroy | 63b2bc6 | 2019-02-21 19:08:49 +0000 | [diff] [blame] | 188 | mmu_mark_initmem_nx(); |
| 189 | else |
| 190 | change_page_attr(page, numpages, PAGE_KERNEL); |
Christophe Leroy | 3184cc4 | 2017-08-02 15:51:03 +0200 | [diff] [blame] | 191 | } |
| 192 | |
Christophe Leroy | 95902e6 | 2017-08-02 15:51:05 +0200 | [diff] [blame] | 193 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 194 | void mark_rodata_ro(void) |
| 195 | { |
| 196 | struct page *page; |
| 197 | unsigned long numpages; |
| 198 | |
Christophe Leroy | 4e3319c | 2020-05-19 05:48:59 +0000 | [diff] [blame] | 199 | if (v_block_mapped((unsigned long)_stext + 1)) { |
Christophe Leroy | 63b2bc6 | 2019-02-21 19:08:49 +0000 | [diff] [blame] | 200 | mmu_mark_rodata_ro(); |
Christophe Leroy | e26ad93 | 2020-01-14 08:13:08 +0000 | [diff] [blame] | 201 | ptdump_check_wx(); |
Christophe Leroy | 63b2bc6 | 2019-02-21 19:08:49 +0000 | [diff] [blame] | 202 | return; |
| 203 | } |
| 204 | |
Christophe Leroy | 95902e6 | 2017-08-02 15:51:05 +0200 | [diff] [blame] | 205 | page = virt_to_page(_stext); |
| 206 | numpages = PFN_UP((unsigned long)_etext) - |
| 207 | PFN_DOWN((unsigned long)_stext); |
| 208 | |
| 209 | change_page_attr(page, numpages, PAGE_KERNEL_ROX); |
| 210 | /* |
| 211 | * mark .rodata as read only. Use __init_begin rather than __end_rodata |
| 212 | * to cover NOTES and EXCEPTION_TABLE. |
| 213 | */ |
| 214 | page = virt_to_page(__start_rodata); |
| 215 | numpages = PFN_UP((unsigned long)__init_begin) - |
| 216 | PFN_DOWN((unsigned long)__start_rodata); |
| 217 | |
| 218 | change_page_attr(page, numpages, PAGE_KERNEL_RO); |
Russell Currey | 453d87f | 2019-05-02 17:39:47 +1000 | [diff] [blame] | 219 | |
| 220 | // mark_initmem_nx() should have already run by now |
| 221 | ptdump_check_wx(); |
Christophe Leroy | 95902e6 | 2017-08-02 15:51:05 +0200 | [diff] [blame] | 222 | } |
| 223 | #endif |
| 224 | |
Christophe Leroy | 3184cc4 | 2017-08-02 15:51:03 +0200 | [diff] [blame] | 225 | #ifdef CONFIG_DEBUG_PAGEALLOC |
Joonsoo Kim | 031bc57 | 2014-12-12 16:55:52 -0800 | [diff] [blame] | 226 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
Benjamin Herrenschmidt | 88df6e9 | 2007-04-12 15:30:22 +1000 | [diff] [blame] | 227 | { |
| 228 | if (PageHighMem(page)) |
| 229 | return; |
| 230 | |
| 231 | change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0)); |
| 232 | } |
| 233 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |