Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 2 | /* |
| 3 | * Page table handling routines for radix page table. |
| 4 | * |
| 5 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 6 | */ |
Michael Ellerman | bd350f7 | 2017-08-30 17:41:29 +1000 | [diff] [blame] | 7 | |
| 8 | #define pr_fmt(fmt) "radix-mmu: " fmt |
| 9 | |
Nicholas Piggin | d38153f | 2019-06-10 13:08:17 +1000 | [diff] [blame] | 10 | #include <linux/io.h> |
Michael Ellerman | bd350f7 | 2017-08-30 17:41:29 +1000 | [diff] [blame] | 11 | #include <linux/kernel.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 12 | #include <linux/sched/mm.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 13 | #include <linux/memblock.h> |
| 14 | #include <linux/of_fdt.h> |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 15 | #include <linux/mm.h> |
Mike Kravetz | 997cdcb | 2019-11-30 17:56:37 -0800 | [diff] [blame] | 16 | #include <linux/hugetlb.h> |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 17 | #include <linux/string_helpers.h> |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 18 | #include <linux/memory.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 19 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 20 | #include <asm/pgalloc.h> |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 21 | #include <asm/mmu_context.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 22 | #include <asm/dma.h> |
| 23 | #include <asm/machdep.h> |
| 24 | #include <asm/mmu.h> |
| 25 | #include <asm/firmware.h> |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 26 | #include <asm/powernv.h> |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 27 | #include <asm/sections.h> |
Nicholas Piggin | 993cfec | 2020-03-02 11:04:10 +1000 | [diff] [blame] | 28 | #include <asm/smp.h> |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 29 | #include <asm/trace.h> |
Michael Ellerman | 890274c | 2019-04-18 16:51:24 +1000 | [diff] [blame] | 30 | #include <asm/uaccess.h> |
Claudio Carvalho | 5223134 | 2019-08-22 00:48:36 -0300 | [diff] [blame] | 31 | #include <asm/ultravisor.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 32 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 33 | #include <trace/events/thp.h> |
| 34 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 35 | unsigned int mmu_pid_bits; |
| 36 | unsigned int mmu_base_pid; |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 37 | unsigned int radix_mem_block_size __ro_after_init; |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 38 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 39 | static __ref void *early_alloc_pgtable(unsigned long size, int nid, |
| 40 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 41 | { |
Mike Rapoport | f806714 | 2019-03-07 16:30:48 -0800 | [diff] [blame] | 42 | phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; |
| 43 | phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 44 | void *ptr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 45 | |
Mike Rapoport | f806714 | 2019-03-07 16:30:48 -0800 | [diff] [blame] | 46 | if (region_start) |
| 47 | min_addr = region_start; |
| 48 | if (region_end) |
| 49 | max_addr = region_end; |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 50 | |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 51 | ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); |
| 52 | |
| 53 | if (!ptr) |
| 54 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n", |
| 55 | __func__, size, size, nid, &min_addr, &max_addr); |
| 56 | |
| 57 | return ptr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 58 | } |
| 59 | |
Aneesh Kumar K.V | 645d5ce | 2020-07-09 18:49:22 +0530 | [diff] [blame] | 60 | /* |
| 61 | * When allocating pud or pmd pointers, we allocate a complete page |
| 62 | * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This |
| 63 | * is to ensure that the page obtained from the memblock allocator |
| 64 | * can be completely used as page table page and can be freed |
| 65 | * correctly when the page table entries are removed. |
| 66 | */ |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 67 | static int early_map_kernel_page(unsigned long ea, unsigned long pa, |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 68 | pgprot_t flags, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 69 | unsigned int map_page_size, |
| 70 | int nid, |
| 71 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 72 | { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 73 | unsigned long pfn = pa >> PAGE_SHIFT; |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 74 | pgd_t *pgdp; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 75 | p4d_t *p4dp; |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 76 | pud_t *pudp; |
| 77 | pmd_t *pmdp; |
| 78 | pte_t *ptep; |
| 79 | |
| 80 | pgdp = pgd_offset_k(ea); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 81 | p4dp = p4d_offset(pgdp, ea); |
| 82 | if (p4d_none(*p4dp)) { |
Aneesh Kumar K.V | 645d5ce | 2020-07-09 18:49:22 +0530 | [diff] [blame] | 83 | pudp = early_alloc_pgtable(PAGE_SIZE, nid, |
| 84 | region_start, region_end); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 85 | p4d_populate(&init_mm, p4dp, pudp); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 86 | } |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 87 | pudp = pud_offset(p4dp, ea); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 88 | if (map_page_size == PUD_SIZE) { |
| 89 | ptep = (pte_t *)pudp; |
| 90 | goto set_the_pte; |
| 91 | } |
| 92 | if (pud_none(*pudp)) { |
Aneesh Kumar K.V | 645d5ce | 2020-07-09 18:49:22 +0530 | [diff] [blame] | 93 | pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start, |
| 94 | region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 95 | pud_populate(&init_mm, pudp, pmdp); |
| 96 | } |
| 97 | pmdp = pmd_offset(pudp, ea); |
| 98 | if (map_page_size == PMD_SIZE) { |
| 99 | ptep = pmdp_ptep(pmdp); |
| 100 | goto set_the_pte; |
| 101 | } |
| 102 | if (!pmd_present(*pmdp)) { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 103 | ptep = early_alloc_pgtable(PAGE_SIZE, nid, |
| 104 | region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 105 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 106 | } |
| 107 | ptep = pte_offset_kernel(pmdp, ea); |
| 108 | |
| 109 | set_the_pte: |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 110 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 111 | smp_wmb(); |
| 112 | return 0; |
| 113 | } |
| 114 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 115 | /* |
| 116 | * nid, region_start, and region_end are hints to try to place the page |
| 117 | * table memory in the same node or region. |
| 118 | */ |
| 119 | static int __map_kernel_page(unsigned long ea, unsigned long pa, |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 120 | pgprot_t flags, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 121 | unsigned int map_page_size, |
| 122 | int nid, |
| 123 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 124 | { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 125 | unsigned long pfn = pa >> PAGE_SHIFT; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 126 | pgd_t *pgdp; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 127 | p4d_t *p4dp; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 128 | pud_t *pudp; |
| 129 | pmd_t *pmdp; |
| 130 | pte_t *ptep; |
| 131 | /* |
| 132 | * Make sure task size is correct as per the max adddr |
| 133 | */ |
| 134 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 135 | |
Aneesh Kumar K.V | 0034d39 | 2019-04-17 18:29:14 +0530 | [diff] [blame] | 136 | #ifdef CONFIG_PPC_64K_PAGES |
| 137 | BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT)); |
| 138 | #endif |
| 139 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 140 | if (unlikely(!slab_is_available())) |
| 141 | return early_map_kernel_page(ea, pa, flags, map_page_size, |
| 142 | nid, region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 143 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 144 | /* |
| 145 | * Should make page table allocation functions be able to take a |
| 146 | * node, so we can place kernel page tables on the right nodes after |
| 147 | * boot. |
| 148 | */ |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 149 | pgdp = pgd_offset_k(ea); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 150 | p4dp = p4d_offset(pgdp, ea); |
| 151 | pudp = pud_alloc(&init_mm, p4dp, ea); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 152 | if (!pudp) |
| 153 | return -ENOMEM; |
| 154 | if (map_page_size == PUD_SIZE) { |
| 155 | ptep = (pte_t *)pudp; |
| 156 | goto set_the_pte; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 157 | } |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 158 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 159 | if (!pmdp) |
| 160 | return -ENOMEM; |
| 161 | if (map_page_size == PMD_SIZE) { |
| 162 | ptep = pmdp_ptep(pmdp); |
| 163 | goto set_the_pte; |
| 164 | } |
| 165 | ptep = pte_alloc_kernel(pmdp, ea); |
| 166 | if (!ptep) |
| 167 | return -ENOMEM; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 168 | |
| 169 | set_the_pte: |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 170 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 171 | smp_wmb(); |
| 172 | return 0; |
| 173 | } |
| 174 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 175 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, |
| 176 | pgprot_t flags, |
| 177 | unsigned int map_page_size) |
| 178 | { |
| 179 | return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); |
| 180 | } |
| 181 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 182 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 183 | void radix__change_memory_range(unsigned long start, unsigned long end, |
| 184 | unsigned long clear) |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 185 | { |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 186 | unsigned long idx; |
| 187 | pgd_t *pgdp; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 188 | p4d_t *p4dp; |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 189 | pud_t *pudp; |
| 190 | pmd_t *pmdp; |
| 191 | pte_t *ptep; |
| 192 | |
| 193 | start = ALIGN_DOWN(start, PAGE_SIZE); |
| 194 | end = PAGE_ALIGN(end); // aligns up |
| 195 | |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 196 | pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", |
| 197 | start, end, clear); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 198 | |
| 199 | for (idx = start; idx < end; idx += PAGE_SIZE) { |
| 200 | pgdp = pgd_offset_k(idx); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 201 | p4dp = p4d_offset(pgdp, idx); |
| 202 | pudp = pud_alloc(&init_mm, p4dp, idx); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 203 | if (!pudp) |
| 204 | continue; |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 205 | if (pud_is_leaf(*pudp)) { |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 206 | ptep = (pte_t *)pudp; |
| 207 | goto update_the_pte; |
| 208 | } |
| 209 | pmdp = pmd_alloc(&init_mm, pudp, idx); |
| 210 | if (!pmdp) |
| 211 | continue; |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 212 | if (pmd_is_leaf(*pmdp)) { |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 213 | ptep = pmdp_ptep(pmdp); |
| 214 | goto update_the_pte; |
| 215 | } |
| 216 | ptep = pte_alloc_kernel(pmdp, idx); |
| 217 | if (!ptep) |
| 218 | continue; |
| 219 | update_the_pte: |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 220 | radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 221 | } |
| 222 | |
| 223 | radix__flush_tlb_kernel_range(start, end); |
| 224 | } |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 225 | |
| 226 | void radix__mark_rodata_ro(void) |
| 227 | { |
| 228 | unsigned long start, end; |
| 229 | |
| 230 | start = (unsigned long)_stext; |
| 231 | end = (unsigned long)__init_begin; |
| 232 | |
| 233 | radix__change_memory_range(start, end, _PAGE_WRITE); |
| 234 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 235 | |
| 236 | void radix__mark_initmem_nx(void) |
| 237 | { |
| 238 | unsigned long start = (unsigned long)__init_begin; |
| 239 | unsigned long end = (unsigned long)__init_end; |
| 240 | |
| 241 | radix__change_memory_range(start, end, _PAGE_EXEC); |
| 242 | } |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 243 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
| 244 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 245 | static inline void __meminit |
| 246 | print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec) |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 247 | { |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 248 | char buf[10]; |
| 249 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 250 | if (end <= start) |
| 251 | return; |
| 252 | |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 253 | string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf)); |
| 254 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 255 | pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, |
| 256 | exec ? " (exec)" : ""); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 257 | } |
| 258 | |
Michael Ellerman | 232aa40 | 2018-08-14 22:37:32 +1000 | [diff] [blame] | 259 | static unsigned long next_boundary(unsigned long addr, unsigned long end) |
| 260 | { |
| 261 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 262 | if (addr < __pa_symbol(__init_begin)) |
| 263 | return __pa_symbol(__init_begin); |
| 264 | #endif |
| 265 | return end; |
| 266 | } |
| 267 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 268 | static int __meminit create_physical_mapping(unsigned long start, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 269 | unsigned long end, |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 270 | unsigned long max_mapping_size, |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 271 | int nid, pgprot_t _prot) |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 272 | { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 273 | unsigned long vaddr, addr, mapping_size = 0; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 274 | bool prev_exec, exec = false; |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 275 | pgprot_t prot; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 276 | int psize; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 277 | |
Christophe Leroy | b711531 | 2020-04-20 18:36:36 +0000 | [diff] [blame] | 278 | start = ALIGN(start, PAGE_SIZE); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 279 | for (addr = start; addr < end; addr += mapping_size) { |
| 280 | unsigned long gap, previous_size; |
| 281 | int rc; |
| 282 | |
Michael Ellerman | 232aa40 | 2018-08-14 22:37:32 +1000 | [diff] [blame] | 283 | gap = next_boundary(addr, end) - addr; |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 284 | if (gap > max_mapping_size) |
| 285 | gap = max_mapping_size; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 286 | previous_size = mapping_size; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 287 | prev_exec = exec; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 288 | |
| 289 | if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && |
Michael Ellerman | 57306c6 | 2018-08-14 22:01:44 +1000 | [diff] [blame] | 290 | mmu_psize_defs[MMU_PAGE_1G].shift) { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 291 | mapping_size = PUD_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 292 | psize = MMU_PAGE_1G; |
| 293 | } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && |
| 294 | mmu_psize_defs[MMU_PAGE_2M].shift) { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 295 | mapping_size = PMD_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 296 | psize = MMU_PAGE_2M; |
| 297 | } else { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 298 | mapping_size = PAGE_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 299 | psize = mmu_virtual_psize; |
| 300 | } |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 301 | |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 302 | vaddr = (unsigned long)__va(addr); |
| 303 | |
Balbir Singh | 7f6d498 | 2017-06-29 03:04:10 +1000 | [diff] [blame] | 304 | if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 305 | overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 306 | prot = PAGE_KERNEL_X; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 307 | exec = true; |
| 308 | } else { |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 309 | prot = _prot; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 310 | exec = false; |
| 311 | } |
| 312 | |
| 313 | if (mapping_size != previous_size || exec != prev_exec) { |
| 314 | print_mapping(start, addr, previous_size, prev_exec); |
| 315 | start = addr; |
| 316 | } |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 317 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 318 | rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 319 | if (rc) |
| 320 | return rc; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 321 | |
| 322 | update_page_count(psize, 1); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 323 | } |
| 324 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 325 | print_mapping(start, addr, mapping_size, exec); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 326 | return 0; |
| 327 | } |
| 328 | |
YueHaibing | d667edc | 2019-05-04 18:24:27 +0800 | [diff] [blame] | 329 | static void __init radix_init_pgtable(void) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 330 | { |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 331 | unsigned long rts_field; |
| 332 | struct memblock_region *reg; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 333 | |
| 334 | /* We don't support slb for radix */ |
| 335 | mmu_slb_size = 0; |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 336 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 337 | /* |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 338 | * Create the linear mapping |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 339 | */ |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 340 | for_each_memblock(memory, reg) { |
| 341 | /* |
| 342 | * The memblock allocator is up at this point, so the |
| 343 | * page tables will be allocated within the range. No |
| 344 | * need or a node (which we don't have yet). |
| 345 | */ |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 346 | |
| 347 | if ((reg->base + reg->size) >= RADIX_VMALLOC_START) { |
Colin Ian King | f341d89 | 2019-04-23 16:10:17 +0100 | [diff] [blame] | 348 | pr_warn("Outside the supported range\n"); |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 349 | continue; |
| 350 | } |
| 351 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 352 | WARN_ON(create_physical_mapping(reg->base, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 353 | reg->base + reg->size, |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 354 | radix_mem_block_size, |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 355 | -1, PAGE_KERNEL)); |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 356 | } |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 357 | |
| 358 | /* Find out how many PID bits are supported */ |
Jordan Niethe | 736bcdd | 2019-12-06 14:17:22 +1100 | [diff] [blame] | 359 | if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) { |
| 360 | if (!mmu_pid_bits) |
| 361 | mmu_pid_bits = 20; |
| 362 | mmu_base_pid = 1; |
| 363 | } else if (cpu_has_feature(CPU_FTR_HVMODE)) { |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 364 | if (!mmu_pid_bits) |
| 365 | mmu_pid_bits = 20; |
| 366 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 367 | /* |
| 368 | * When KVM is possible, we only use the top half of the |
| 369 | * PID space to avoid collisions between host and guest PIDs |
| 370 | * which can cause problems due to prefetch when exiting the |
| 371 | * guest with AIL=3 |
| 372 | */ |
| 373 | mmu_base_pid = 1 << (mmu_pid_bits - 1); |
| 374 | #else |
| 375 | mmu_base_pid = 1; |
| 376 | #endif |
| 377 | } else { |
| 378 | /* The guest uses the bottom half of the PID space */ |
| 379 | if (!mmu_pid_bits) |
| 380 | mmu_pid_bits = 19; |
| 381 | mmu_base_pid = 1; |
| 382 | } |
| 383 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 384 | /* |
| 385 | * Allocate Partition table and process table for the |
| 386 | * host. |
| 387 | */ |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 388 | BUG_ON(PRTB_SIZE_SHIFT > 36); |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 389 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 390 | /* |
| 391 | * Fill in the process table. |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 392 | */ |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 393 | rts_field = radix__get_tree_size(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 394 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); |
Nicholas Piggin | ed6546b | 2019-09-03 01:29:26 +1000 | [diff] [blame] | 395 | |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 396 | /* |
| 397 | * The init_mm context is given the first available (non-zero) PID, |
| 398 | * which is the "guard PID" and contains no page table. PIDR should |
| 399 | * never be set to zero because that duplicates the kernel address |
| 400 | * space at the 0x0... offset (quadrant 0)! |
| 401 | * |
| 402 | * An arbitrary PID that may later be allocated by the PID allocator |
| 403 | * for userspace processes must not be used either, because that |
| 404 | * would cause stale user mappings for that PID on CPUs outside of |
| 405 | * the TLB invalidation scheme (because it won't be in mm_cpumask). |
| 406 | * |
| 407 | * So permanently carve out one PID for the purpose of a guard PID. |
| 408 | */ |
| 409 | init_mm.context.id = mmu_base_pid; |
| 410 | mmu_base_pid++; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 411 | } |
| 412 | |
| 413 | static void __init radix_init_partition_table(void) |
| 414 | { |
Nicholas Piggin | ed6546b | 2019-09-03 01:29:26 +1000 | [diff] [blame] | 415 | unsigned long rts_field, dw0, dw1; |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 416 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 417 | mmu_partition_table_init(); |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 418 | rts_field = radix__get_tree_size(); |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 419 | dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; |
Nicholas Piggin | ed6546b | 2019-09-03 01:29:26 +1000 | [diff] [blame] | 420 | dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR; |
Nicholas Piggin | 7d805ac | 2019-09-03 01:29:30 +1000 | [diff] [blame] | 421 | mmu_partition_table_set_entry(0, dw0, dw1, false); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 422 | |
Aneesh Kumar K.V | 5654741 | 2016-07-13 15:05:25 +0530 | [diff] [blame] | 423 | pr_info("Initializing Radix MMU\n"); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 424 | } |
| 425 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 426 | static int __init get_idx_from_shift(unsigned int shift) |
| 427 | { |
| 428 | int idx = -1; |
| 429 | |
| 430 | switch (shift) { |
| 431 | case 0xc: |
| 432 | idx = MMU_PAGE_4K; |
| 433 | break; |
| 434 | case 0x10: |
| 435 | idx = MMU_PAGE_64K; |
| 436 | break; |
| 437 | case 0x15: |
| 438 | idx = MMU_PAGE_2M; |
| 439 | break; |
| 440 | case 0x1e: |
| 441 | idx = MMU_PAGE_1G; |
| 442 | break; |
| 443 | } |
| 444 | return idx; |
| 445 | } |
| 446 | |
| 447 | static int __init radix_dt_scan_page_sizes(unsigned long node, |
| 448 | const char *uname, int depth, |
| 449 | void *data) |
| 450 | { |
| 451 | int size = 0; |
| 452 | int shift, idx; |
| 453 | unsigned int ap; |
| 454 | const __be32 *prop; |
| 455 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
| 456 | |
| 457 | /* We are scanning "cpu" nodes only */ |
| 458 | if (type == NULL || strcmp(type, "cpu") != 0) |
| 459 | return 0; |
| 460 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 461 | /* Find MMU PID size */ |
| 462 | prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size); |
| 463 | if (prop && size == 4) |
| 464 | mmu_pid_bits = be32_to_cpup(prop); |
| 465 | |
| 466 | /* Grab page size encodings */ |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 467 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); |
| 468 | if (!prop) |
| 469 | return 0; |
| 470 | |
| 471 | pr_info("Page sizes from device-tree:\n"); |
| 472 | for (; size >= 4; size -= 4, ++prop) { |
| 473 | |
| 474 | struct mmu_psize_def *def; |
| 475 | |
| 476 | /* top 3 bit is AP encoding */ |
| 477 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); |
| 478 | ap = be32_to_cpu(prop[0]) >> 29; |
Balbir Singh | ac8d381 | 2016-11-05 15:24:22 +1100 | [diff] [blame] | 479 | pr_info("Page size shift = %d AP=0x%x\n", shift, ap); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 480 | |
| 481 | idx = get_idx_from_shift(shift); |
| 482 | if (idx < 0) |
| 483 | continue; |
| 484 | |
| 485 | def = &mmu_psize_defs[idx]; |
| 486 | def->shift = shift; |
| 487 | def->ap = ap; |
| 488 | } |
| 489 | |
| 490 | /* needed ? */ |
| 491 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; |
| 492 | return 1; |
| 493 | } |
| 494 | |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 495 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 496 | static int __init probe_memory_block_size(unsigned long node, const char *uname, int |
| 497 | depth, void *data) |
| 498 | { |
| 499 | unsigned long *mem_block_size = (unsigned long *)data; |
| 500 | const __be64 *prop; |
| 501 | int len; |
| 502 | |
| 503 | if (depth != 1) |
| 504 | return 0; |
| 505 | |
| 506 | if (strcmp(uname, "ibm,dynamic-reconfiguration-memory")) |
| 507 | return 0; |
| 508 | |
| 509 | prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len); |
| 510 | if (!prop || len < sizeof(__be64)) |
| 511 | /* |
| 512 | * Nothing in the device tree |
| 513 | */ |
| 514 | *mem_block_size = MIN_MEMORY_BLOCK_SIZE; |
| 515 | else |
| 516 | *mem_block_size = be64_to_cpup(prop); |
| 517 | return 1; |
| 518 | } |
| 519 | |
| 520 | static unsigned long radix_memory_block_size(void) |
| 521 | { |
| 522 | unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE; |
| 523 | |
| 524 | /* |
| 525 | * OPAL firmware feature is set by now. Hence we are ok |
| 526 | * to test OPAL feature. |
| 527 | */ |
| 528 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
| 529 | mem_block_size = 1UL * 1024 * 1024 * 1024; |
| 530 | else |
| 531 | of_scan_flat_dt(probe_memory_block_size, &mem_block_size); |
| 532 | |
| 533 | return mem_block_size; |
| 534 | } |
| 535 | |
| 536 | #else /* CONFIG_MEMORY_HOTPLUG */ |
| 537 | |
| 538 | static unsigned long radix_memory_block_size(void) |
| 539 | { |
| 540 | return 1UL * 1024 * 1024 * 1024; |
| 541 | } |
| 542 | |
| 543 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 544 | |
| 545 | |
Michael Ellerman | 2537b09 | 2016-07-26 21:55:27 +1000 | [diff] [blame] | 546 | void __init radix__early_init_devtree(void) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 547 | { |
| 548 | int rc; |
| 549 | |
| 550 | /* |
| 551 | * Try to find the available page sizes in the device-tree |
| 552 | */ |
| 553 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 554 | if (!rc) { |
| 555 | /* |
| 556 | * No page size details found in device tree. |
| 557 | * Let's assume we have page 4k and 64k support |
| 558 | */ |
| 559 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; |
| 560 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 561 | |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 562 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; |
| 563 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Max mapping size used when mapping pages. We don't use |
| 568 | * ppc_md.memory_block_size() here because this get called |
| 569 | * early and we don't have machine probe called yet. Also |
| 570 | * the pseries implementation only check for ibm,lmb-size. |
| 571 | * All hypervisor supporting radix do expose that device |
| 572 | * tree node. |
| 573 | */ |
| 574 | radix_mem_block_size = radix_memory_block_size(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 575 | return; |
| 576 | } |
| 577 | |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 578 | static void radix_init_amor(void) |
| 579 | { |
| 580 | /* |
| 581 | * In HV mode, we init AMOR (Authority Mask Override Register) so that |
| 582 | * the hypervisor and guest can setup IAMR (Instruction Authority Mask |
| 583 | * Register), enable key 0 and set it to 1. |
| 584 | * |
| 585 | * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) |
| 586 | */ |
| 587 | mtspr(SPRN_AMOR, (3ul << 62)); |
| 588 | } |
| 589 | |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame] | 590 | #ifdef CONFIG_PPC_KUEP |
| 591 | void setup_kuep(bool disabled) |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 592 | { |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame] | 593 | if (disabled || !early_radix_enabled()) |
| 594 | return; |
| 595 | |
| 596 | if (smp_processor_id() == boot_cpuid) |
| 597 | pr_info("Activating Kernel Userspace Execution Prevention\n"); |
| 598 | |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 599 | /* |
| 600 | * Radix always uses key0 of the IAMR to determine if an access is |
| 601 | * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction |
| 602 | * fetch. |
| 603 | */ |
Nicholas Piggin | 2bf1071 | 2018-07-05 18:47:00 +1000 | [diff] [blame] | 604 | mtspr(SPRN_IAMR, (1ul << 62)); |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 605 | } |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame] | 606 | #endif |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 607 | |
Michael Ellerman | 890274c | 2019-04-18 16:51:24 +1000 | [diff] [blame] | 608 | #ifdef CONFIG_PPC_KUAP |
| 609 | void setup_kuap(bool disabled) |
| 610 | { |
| 611 | if (disabled || !early_radix_enabled()) |
| 612 | return; |
| 613 | |
| 614 | if (smp_processor_id() == boot_cpuid) { |
| 615 | pr_info("Activating Kernel Userspace Access Prevention\n"); |
| 616 | cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP; |
| 617 | } |
| 618 | |
| 619 | /* Make sure userspace can't change the AMR */ |
| 620 | mtspr(SPRN_UAMOR, 0); |
| 621 | mtspr(SPRN_AMR, AMR_KUAP_BLOCKED); |
| 622 | isync(); |
| 623 | } |
| 624 | #endif |
| 625 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 626 | void __init radix__early_init_mmu(void) |
| 627 | { |
| 628 | unsigned long lpcr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 629 | |
| 630 | #ifdef CONFIG_PPC_64K_PAGES |
| 631 | /* PAGE_SIZE mappings */ |
| 632 | mmu_virtual_psize = MMU_PAGE_64K; |
| 633 | #else |
| 634 | mmu_virtual_psize = MMU_PAGE_4K; |
| 635 | #endif |
| 636 | |
| 637 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 638 | /* vmemmap mapping */ |
Aneesh Kumar K.V | 89a3496 | 2019-07-01 20:04:42 +0530 | [diff] [blame] | 639 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { |
| 640 | /* |
| 641 | * map vmemmap using 2M if available |
| 642 | */ |
| 643 | mmu_vmemmap_psize = MMU_PAGE_2M; |
| 644 | } else |
| 645 | mmu_vmemmap_psize = mmu_virtual_psize; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 646 | #endif |
| 647 | /* |
| 648 | * initialize page table size |
| 649 | */ |
| 650 | __pte_index_size = RADIX_PTE_INDEX_SIZE; |
| 651 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; |
| 652 | __pud_index_size = RADIX_PUD_INDEX_SIZE; |
| 653 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame] | 654 | __pud_cache_index = RADIX_PUD_INDEX_SIZE; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 655 | __pte_table_size = RADIX_PTE_TABLE_SIZE; |
| 656 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; |
| 657 | __pud_table_size = RADIX_PUD_TABLE_SIZE; |
| 658 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; |
| 659 | |
Aneesh Kumar K.V | a2f41eb | 2016-04-29 23:26:19 +1000 | [diff] [blame] | 660 | __pmd_val_bits = RADIX_PMD_VAL_BITS; |
| 661 | __pud_val_bits = RADIX_PUD_VAL_BITS; |
| 662 | __pgd_val_bits = RADIX_PGD_VAL_BITS; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 663 | |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 664 | __kernel_virt_start = RADIX_KERN_VIRT_START; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 665 | __vmalloc_start = RADIX_VMALLOC_START; |
| 666 | __vmalloc_end = RADIX_VMALLOC_END; |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 667 | __kernel_io_start = RADIX_KERN_IO_START; |
Aneesh Kumar K.V | a35a3c6 | 2019-04-17 18:29:13 +0530 | [diff] [blame] | 668 | __kernel_io_end = RADIX_KERN_IO_END; |
Aneesh Kumar K.V | 0034d39 | 2019-04-17 18:29:14 +0530 | [diff] [blame] | 669 | vmemmap = (struct page *)RADIX_VMEMMAP_START; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 670 | ioremap_bot = IOREMAP_BASE; |
Darren Stevens | bfa3708 | 2016-06-29 21:06:28 +0100 | [diff] [blame] | 671 | |
| 672 | #ifdef CONFIG_PCI |
| 673 | pci_io_base = ISA_IO_BASE; |
| 674 | #endif |
Aneesh Kumar K.V | fb4e5db | 2018-03-22 14:13:50 +0530 | [diff] [blame] | 675 | __pte_frag_nr = RADIX_PTE_FRAG_NR; |
| 676 | __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT; |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 677 | __pmd_frag_nr = RADIX_PMD_FRAG_NR; |
| 678 | __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 679 | |
Nicholas Piggin | ed6546b | 2019-09-03 01:29:26 +1000 | [diff] [blame] | 680 | radix_init_pgtable(); |
| 681 | |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 682 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 683 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 684 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 685 | radix_init_partition_table(); |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 686 | radix_init_amor(); |
Paul Mackerras | cc3d294 | 2017-01-30 21:21:36 +1100 | [diff] [blame] | 687 | } else { |
| 688 | radix_init_pseries(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 689 | } |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 690 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 691 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
| 692 | |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 693 | /* Switch to the guard PID before turning on MMU */ |
| 694 | radix__switch_mmu_context(NULL, &init_mm); |
Nicholas Piggin | 7e71c42 | 2019-09-03 01:29:29 +1000 | [diff] [blame] | 695 | tlbiel_all(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 696 | } |
| 697 | |
| 698 | void radix__early_init_mmu_secondary(void) |
| 699 | { |
| 700 | unsigned long lpcr; |
| 701 | /* |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 702 | * update partition table control register and UPRT |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 703 | */ |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 704 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 705 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 706 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 707 | |
Claudio Carvalho | 5223134 | 2019-08-22 00:48:36 -0300 | [diff] [blame] | 708 | set_ptcr_when_no_uv(__pa(partition_tb) | |
| 709 | (PATB_SIZE_SHIFT - 12)); |
| 710 | |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 711 | radix_init_amor(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 712 | } |
Nicholas Piggin | d474827 | 2017-12-24 01:15:50 +1000 | [diff] [blame] | 713 | |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 714 | radix__switch_mmu_context(NULL, &init_mm); |
Nicholas Piggin | 7e71c42 | 2019-09-03 01:29:29 +1000 | [diff] [blame] | 715 | tlbiel_all(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 716 | } |
| 717 | |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 718 | void radix__mmu_cleanup_all(void) |
| 719 | { |
| 720 | unsigned long lpcr; |
| 721 | |
| 722 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 723 | lpcr = mfspr(SPRN_LPCR); |
| 724 | mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); |
Claudio Carvalho | 5223134 | 2019-08-22 00:48:36 -0300 | [diff] [blame] | 725 | set_ptcr_when_no_uv(0); |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 726 | powernv_set_nmmu_ptcr(0); |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 727 | radix__flush_tlb_all(); |
| 728 | } |
| 729 | } |
| 730 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 731 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 732 | phys_addr_t first_memblock_size) |
| 733 | { |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 734 | /* |
| 735 | * We don't currently support the first MEMBLOCK not mapping 0 |
Aneesh Kumar K.V | 177ba7c | 2016-04-29 23:26:10 +1000 | [diff] [blame] | 736 | * physical on those processors |
| 737 | */ |
| 738 | BUG_ON(first_memblock_base != 0); |
Nicholas Piggin | 1513c33 | 2017-12-22 21:17:08 +1000 | [diff] [blame] | 739 | |
Nicholas Piggin | 5eae82c | 2017-12-22 21:17:11 +1000 | [diff] [blame] | 740 | /* |
| 741 | * Radix mode is not limited by RMA / VRMA addressing. |
| 742 | */ |
| 743 | ppc64_rma_size = ULONG_MAX; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 744 | } |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 745 | |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 746 | #ifdef CONFIG_MEMORY_HOTPLUG |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 747 | static void free_pte_table(pte_t *pte_start, pmd_t *pmd) |
| 748 | { |
| 749 | pte_t *pte; |
| 750 | int i; |
| 751 | |
| 752 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 753 | pte = pte_start + i; |
| 754 | if (!pte_none(*pte)) |
| 755 | return; |
| 756 | } |
| 757 | |
| 758 | pte_free_kernel(&init_mm, pte_start); |
| 759 | pmd_clear(pmd); |
| 760 | } |
| 761 | |
| 762 | static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
| 763 | { |
| 764 | pmd_t *pmd; |
| 765 | int i; |
| 766 | |
| 767 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 768 | pmd = pmd_start + i; |
| 769 | if (!pmd_none(*pmd)) |
| 770 | return; |
| 771 | } |
| 772 | |
| 773 | pmd_free(&init_mm, pmd_start); |
| 774 | pud_clear(pud); |
| 775 | } |
| 776 | |
Bharata B Rao | 9ce8853 | 2020-07-09 18:49:23 +0530 | [diff] [blame] | 777 | static void free_pud_table(pud_t *pud_start, p4d_t *p4d) |
| 778 | { |
| 779 | pud_t *pud; |
| 780 | int i; |
| 781 | |
| 782 | for (i = 0; i < PTRS_PER_PUD; i++) { |
| 783 | pud = pud_start + i; |
| 784 | if (!pud_none(*pud)) |
| 785 | return; |
| 786 | } |
| 787 | |
| 788 | pud_free(&init_mm, pud_start); |
| 789 | p4d_clear(p4d); |
| 790 | } |
| 791 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 792 | static void remove_pte_table(pte_t *pte_start, unsigned long addr, |
| 793 | unsigned long end) |
| 794 | { |
| 795 | unsigned long next; |
| 796 | pte_t *pte; |
| 797 | |
| 798 | pte = pte_start + pte_index(addr); |
| 799 | for (; addr < end; addr = next, pte++) { |
| 800 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
| 801 | if (next > end) |
| 802 | next = end; |
| 803 | |
| 804 | if (!pte_present(*pte)) |
| 805 | continue; |
| 806 | |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 807 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) { |
| 808 | /* |
| 809 | * The vmemmap_free() and remove_section_mapping() |
| 810 | * codepaths call us with aligned addresses. |
| 811 | */ |
| 812 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 813 | continue; |
| 814 | } |
| 815 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 816 | pte_clear(&init_mm, addr, pte); |
| 817 | } |
| 818 | } |
| 819 | |
| 820 | static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, |
| 821 | unsigned long end) |
| 822 | { |
| 823 | unsigned long next; |
| 824 | pte_t *pte_base; |
| 825 | pmd_t *pmd; |
| 826 | |
| 827 | pmd = pmd_start + pmd_index(addr); |
| 828 | for (; addr < end; addr = next, pmd++) { |
| 829 | next = pmd_addr_end(addr, end); |
| 830 | |
| 831 | if (!pmd_present(*pmd)) |
| 832 | continue; |
| 833 | |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 834 | if (pmd_is_leaf(*pmd)) { |
Bharata B Rao | d6d6ebf | 2020-07-09 18:49:24 +0530 | [diff] [blame] | 835 | if (!IS_ALIGNED(addr, PMD_SIZE) || |
| 836 | !IS_ALIGNED(next, PMD_SIZE)) { |
| 837 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 838 | continue; |
| 839 | } |
| 840 | pte_clear(&init_mm, addr, (pte_t *)pmd); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 841 | continue; |
| 842 | } |
| 843 | |
| 844 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); |
| 845 | remove_pte_table(pte_base, addr, next); |
| 846 | free_pte_table(pte_base, pmd); |
| 847 | } |
| 848 | } |
| 849 | |
| 850 | static void remove_pud_table(pud_t *pud_start, unsigned long addr, |
| 851 | unsigned long end) |
| 852 | { |
| 853 | unsigned long next; |
| 854 | pmd_t *pmd_base; |
| 855 | pud_t *pud; |
| 856 | |
| 857 | pud = pud_start + pud_index(addr); |
| 858 | for (; addr < end; addr = next, pud++) { |
| 859 | next = pud_addr_end(addr, end); |
| 860 | |
| 861 | if (!pud_present(*pud)) |
| 862 | continue; |
| 863 | |
Aneesh Kumar K.V | d6eaced | 2019-05-14 11:33:00 +0530 | [diff] [blame] | 864 | if (pud_is_leaf(*pud)) { |
Bharata B Rao | d6d6ebf | 2020-07-09 18:49:24 +0530 | [diff] [blame] | 865 | if (!IS_ALIGNED(addr, PUD_SIZE) || |
| 866 | !IS_ALIGNED(next, PUD_SIZE)) { |
| 867 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 868 | continue; |
| 869 | } |
| 870 | pte_clear(&init_mm, addr, (pte_t *)pud); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 871 | continue; |
| 872 | } |
| 873 | |
| 874 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); |
| 875 | remove_pmd_table(pmd_base, addr, next); |
| 876 | free_pmd_table(pmd_base, pud); |
| 877 | } |
| 878 | } |
| 879 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 880 | static void __meminit remove_pagetable(unsigned long start, unsigned long end) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 881 | { |
| 882 | unsigned long addr, next; |
| 883 | pud_t *pud_base; |
| 884 | pgd_t *pgd; |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 885 | p4d_t *p4d; |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 886 | |
| 887 | spin_lock(&init_mm.page_table_lock); |
| 888 | |
| 889 | for (addr = start; addr < end; addr = next) { |
| 890 | next = pgd_addr_end(addr, end); |
| 891 | |
| 892 | pgd = pgd_offset_k(addr); |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 893 | p4d = p4d_offset(pgd, addr); |
| 894 | if (!p4d_present(*p4d)) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 895 | continue; |
| 896 | |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 897 | if (p4d_is_leaf(*p4d)) { |
Bharata B Rao | d6d6ebf | 2020-07-09 18:49:24 +0530 | [diff] [blame] | 898 | if (!IS_ALIGNED(addr, P4D_SIZE) || |
| 899 | !IS_ALIGNED(next, P4D_SIZE)) { |
| 900 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 901 | continue; |
| 902 | } |
| 903 | |
| 904 | pte_clear(&init_mm, addr, (pte_t *)pgd); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 905 | continue; |
| 906 | } |
| 907 | |
Mike Rapoport | 2fb4706 | 2020-06-04 16:46:44 -0700 | [diff] [blame] | 908 | pud_base = (pud_t *)p4d_page_vaddr(*p4d); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 909 | remove_pud_table(pud_base, addr, next); |
Bharata B Rao | 9ce8853 | 2020-07-09 18:49:23 +0530 | [diff] [blame] | 910 | free_pud_table(pud_base, p4d); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 911 | } |
| 912 | |
| 913 | spin_unlock(&init_mm.page_table_lock); |
| 914 | radix__flush_tlb_kernel_range(start, end); |
| 915 | } |
| 916 | |
Logan Gunthorpe | 4e00c5a | 2020-04-10 14:33:32 -0700 | [diff] [blame] | 917 | int __meminit radix__create_section_mapping(unsigned long start, |
| 918 | unsigned long end, int nid, |
| 919 | pgprot_t prot) |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 920 | { |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 921 | if (end >= RADIX_VMALLOC_START) { |
Colin Ian King | f341d89 | 2019-04-23 16:10:17 +0100 | [diff] [blame] | 922 | pr_warn("Outside the supported range\n"); |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 923 | return -1; |
| 924 | } |
| 925 | |
Aneesh Kumar K.V | af9d00e | 2020-07-09 18:49:25 +0530 | [diff] [blame^] | 926 | return create_physical_mapping(__pa(start), __pa(end), |
| 927 | radix_mem_block_size, nid, prot); |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 928 | } |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 929 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 930 | int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 931 | { |
| 932 | remove_pagetable(start, end); |
| 933 | return 0; |
| 934 | } |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 935 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 936 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 937 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Nicholas Piggin | 29ab6c4 | 2018-02-14 01:08:22 +1000 | [diff] [blame] | 938 | static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, |
| 939 | pgprot_t flags, unsigned int map_page_size, |
| 940 | int nid) |
| 941 | { |
| 942 | return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); |
| 943 | } |
| 944 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 945 | int __meminit radix__vmemmap_create_mapping(unsigned long start, |
| 946 | unsigned long page_size, |
| 947 | unsigned long phys) |
| 948 | { |
| 949 | /* Create a PTE encoding */ |
| 950 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 951 | int nid = early_pfn_to_nid(phys >> PAGE_SHIFT); |
| 952 | int ret; |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 953 | |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 954 | if ((start + page_size) >= RADIX_VMEMMAP_END) { |
Colin Ian King | f341d89 | 2019-04-23 16:10:17 +0100 | [diff] [blame] | 955 | pr_warn("Outside the supported range\n"); |
Aneesh Kumar K.V | e090939 | 2019-04-17 18:29:15 +0530 | [diff] [blame] | 956 | return -1; |
| 957 | } |
| 958 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 959 | ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid); |
| 960 | BUG_ON(ret); |
| 961 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 962 | return 0; |
| 963 | } |
| 964 | |
| 965 | #ifdef CONFIG_MEMORY_HOTPLUG |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 966 | void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 967 | { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 968 | remove_pagetable(start, start + page_size); |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 969 | } |
| 970 | #endif |
| 971 | #endif |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 972 | |
| 973 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 974 | |
| 975 | unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 976 | pmd_t *pmdp, unsigned long clr, |
| 977 | unsigned long set) |
| 978 | { |
| 979 | unsigned long old; |
| 980 | |
| 981 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 982 | WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 983 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 984 | #endif |
| 985 | |
| 986 | old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); |
| 987 | trace_hugepage_update(addr, old, clr, set); |
| 988 | |
| 989 | return old; |
| 990 | } |
| 991 | |
| 992 | pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 993 | pmd_t *pmdp) |
| 994 | |
| 995 | { |
| 996 | pmd_t pmd; |
| 997 | |
| 998 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 999 | VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 1000 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1001 | /* |
| 1002 | * khugepaged calls this for normal pmd |
| 1003 | */ |
| 1004 | pmd = *pmdp; |
| 1005 | pmd_clear(pmdp); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 1006 | |
Aneesh Kumar K.V | e21dfbf | 2020-05-05 12:47:27 +0530 | [diff] [blame] | 1007 | /* |
| 1008 | * pmdp collapse_flush need to ensure that there are no parallel gup |
| 1009 | * walk after this call. This is needed so that we can have stable |
| 1010 | * page ref count when collapsing a page. We don't allow a collapse page |
| 1011 | * if we have gup taken on the page. We can ensure that by sending IPI |
| 1012 | * because gup walk happens with IRQ disabled. |
| 1013 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 1014 | serialize_against_pte_lookup(vma->vm_mm); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 1015 | |
| 1016 | radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); |
| 1017 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1018 | return pmd; |
| 1019 | } |
| 1020 | |
| 1021 | /* |
| 1022 | * For us pgtable_t is pte_t *. Inorder to save the deposisted |
| 1023 | * page table, we consider the allocated page table as a list |
| 1024 | * head. On withdraw we need to make sure we zero out the used |
| 1025 | * list_head memory area. |
| 1026 | */ |
| 1027 | void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 1028 | pgtable_t pgtable) |
| 1029 | { |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1030 | struct list_head *lh = (struct list_head *) pgtable; |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1031 | |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1032 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1033 | |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1034 | /* FIFO */ |
| 1035 | if (!pmd_huge_pte(mm, pmdp)) |
| 1036 | INIT_LIST_HEAD(lh); |
| 1037 | else |
| 1038 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 1039 | pmd_huge_pte(mm, pmdp) = pgtable; |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1040 | } |
| 1041 | |
| 1042 | pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
| 1043 | { |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1044 | pte_t *ptep; |
| 1045 | pgtable_t pgtable; |
| 1046 | struct list_head *lh; |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1047 | |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1048 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1049 | |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1050 | /* FIFO */ |
| 1051 | pgtable = pmd_huge_pte(mm, pmdp); |
| 1052 | lh = (struct list_head *) pgtable; |
| 1053 | if (list_empty(lh)) |
| 1054 | pmd_huge_pte(mm, pmdp) = NULL; |
| 1055 | else { |
| 1056 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
| 1057 | list_del(lh); |
| 1058 | } |
| 1059 | ptep = (pte_t *) pgtable; |
| 1060 | *ptep = __pte(0); |
| 1061 | ptep++; |
| 1062 | *ptep = __pte(0); |
| 1063 | return pgtable; |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1064 | } |
| 1065 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1066 | pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, |
Christophe Leroy | 47d9994 | 2019-03-29 10:00:00 +0000 | [diff] [blame] | 1067 | unsigned long addr, pmd_t *pmdp) |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1068 | { |
| 1069 | pmd_t old_pmd; |
| 1070 | unsigned long old; |
| 1071 | |
| 1072 | old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 1073 | old_pmd = __pmd(old); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1074 | return old_pmd; |
| 1075 | } |
| 1076 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1077 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1078 | |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 1079 | void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, |
| 1080 | pte_t entry, unsigned long address, int psize) |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1081 | { |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 1082 | struct mm_struct *mm = vma->vm_mm; |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1083 | unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | |
| 1084 | _PAGE_RW | _PAGE_EXEC); |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1085 | |
| 1086 | unsigned long change = pte_val(entry) ^ pte_val(*ptep); |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1087 | /* |
| 1088 | * To avoid NMMU hang while relaxing access, we need mark |
| 1089 | * the pte invalid in between. |
| 1090 | */ |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1091 | if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) { |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1092 | unsigned long old_pte, new_pte; |
| 1093 | |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1094 | old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID); |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1095 | /* |
| 1096 | * new value of pte |
| 1097 | */ |
| 1098 | new_pte = old_pte | set; |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1099 | radix__flush_tlb_page_psize(mm, address, psize); |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1100 | __radix_pte_update(ptep, _PAGE_INVALID, new_pte); |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1101 | } else { |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1102 | __radix_pte_update(ptep, 0, set); |
Nicholas Piggin | e5f7cb5 | 2018-06-01 20:01:15 +1000 | [diff] [blame] | 1103 | /* |
| 1104 | * Book3S does not require a TLB flush when relaxing access |
| 1105 | * restrictions when the address space is not attached to a |
| 1106 | * NMMU, because the core MMU will reload the pte after taking |
| 1107 | * an access fault, which is defined by the architectue. |
| 1108 | */ |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1109 | } |
Nicholas Piggin | f1cb8f9 | 2018-06-01 20:01:19 +1000 | [diff] [blame] | 1110 | /* See ptesync comment in radix__set_pte_at */ |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1111 | } |
Aneesh Kumar K.V | 5b32336 | 2019-03-05 15:46:33 -0800 | [diff] [blame] | 1112 | |
| 1113 | void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, |
| 1114 | unsigned long addr, pte_t *ptep, |
| 1115 | pte_t old_pte, pte_t pte) |
| 1116 | { |
| 1117 | struct mm_struct *mm = vma->vm_mm; |
| 1118 | |
| 1119 | /* |
| 1120 | * To avoid NMMU hang while relaxing access we need to flush the tlb before |
| 1121 | * we set the new value. We need to do this only for radix, because hash |
| 1122 | * translation does flush when updating the linux pte. |
| 1123 | */ |
| 1124 | if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && |
| 1125 | (atomic_read(&mm->context.copros) > 0)) |
| 1126 | radix__flush_tlb_page(vma, addr); |
| 1127 | |
| 1128 | set_pte_at(mm, addr, ptep, pte); |
| 1129 | } |
Nicholas Piggin | d38153f | 2019-06-10 13:08:17 +1000 | [diff] [blame] | 1130 | |
Nicholas Piggin | d909f91 | 2019-06-10 13:08:18 +1000 | [diff] [blame] | 1131 | int __init arch_ioremap_pud_supported(void) |
| 1132 | { |
| 1133 | /* HPT does not cope with large pages in the vmalloc area */ |
| 1134 | return radix_enabled(); |
| 1135 | } |
| 1136 | |
| 1137 | int __init arch_ioremap_pmd_supported(void) |
| 1138 | { |
| 1139 | return radix_enabled(); |
| 1140 | } |
| 1141 | |
| 1142 | int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) |
| 1143 | { |
| 1144 | return 0; |
| 1145 | } |
| 1146 | |
| 1147 | int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) |
| 1148 | { |
| 1149 | pte_t *ptep = (pte_t *)pud; |
| 1150 | pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot); |
| 1151 | |
| 1152 | if (!radix_enabled()) |
| 1153 | return 0; |
| 1154 | |
| 1155 | set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud); |
| 1156 | |
| 1157 | return 1; |
| 1158 | } |
| 1159 | |
| 1160 | int pud_clear_huge(pud_t *pud) |
| 1161 | { |
| 1162 | if (pud_huge(*pud)) { |
| 1163 | pud_clear(pud); |
| 1164 | return 1; |
| 1165 | } |
| 1166 | |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
| 1170 | int pud_free_pmd_page(pud_t *pud, unsigned long addr) |
| 1171 | { |
| 1172 | pmd_t *pmd; |
| 1173 | int i; |
| 1174 | |
| 1175 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
| 1176 | pud_clear(pud); |
| 1177 | |
| 1178 | flush_tlb_kernel_range(addr, addr + PUD_SIZE); |
| 1179 | |
| 1180 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 1181 | if (!pmd_none(pmd[i])) { |
| 1182 | pte_t *pte; |
| 1183 | pte = (pte_t *)pmd_page_vaddr(pmd[i]); |
| 1184 | |
| 1185 | pte_free_kernel(&init_mm, pte); |
| 1186 | } |
| 1187 | } |
| 1188 | |
| 1189 | pmd_free(&init_mm, pmd); |
| 1190 | |
| 1191 | return 1; |
| 1192 | } |
| 1193 | |
| 1194 | int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) |
| 1195 | { |
| 1196 | pte_t *ptep = (pte_t *)pmd; |
| 1197 | pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot); |
| 1198 | |
| 1199 | if (!radix_enabled()) |
| 1200 | return 0; |
| 1201 | |
| 1202 | set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd); |
| 1203 | |
| 1204 | return 1; |
| 1205 | } |
| 1206 | |
| 1207 | int pmd_clear_huge(pmd_t *pmd) |
| 1208 | { |
| 1209 | if (pmd_huge(*pmd)) { |
| 1210 | pmd_clear(pmd); |
| 1211 | return 1; |
| 1212 | } |
| 1213 | |
| 1214 | return 0; |
| 1215 | } |
| 1216 | |
| 1217 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
| 1218 | { |
| 1219 | pte_t *pte; |
| 1220 | |
| 1221 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
| 1222 | pmd_clear(pmd); |
| 1223 | |
| 1224 | flush_tlb_kernel_range(addr, addr + PMD_SIZE); |
| 1225 | |
| 1226 | pte_free_kernel(&init_mm, pte); |
| 1227 | |
| 1228 | return 1; |
| 1229 | } |
| 1230 | |
Anshuman Khandual | 0f472d0 | 2019-07-16 16:27:33 -0700 | [diff] [blame] | 1231 | int __init arch_ioremap_p4d_supported(void) |
| 1232 | { |
| 1233 | return 0; |
| 1234 | } |