Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Page table handling routines for radix page table. |
| 3 | * |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
Michael Ellerman | bd350f7 | 2017-08-30 17:41:29 +1000 | [diff] [blame] | 11 | |
| 12 | #define pr_fmt(fmt) "radix-mmu: " fmt |
| 13 | |
| 14 | #include <linux/kernel.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 15 | #include <linux/sched/mm.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 16 | #include <linux/memblock.h> |
| 17 | #include <linux/of_fdt.h> |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 18 | #include <linux/mm.h> |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 19 | #include <linux/string_helpers.h> |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 20 | #include <linux/stop_machine.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 21 | |
| 22 | #include <asm/pgtable.h> |
| 23 | #include <asm/pgalloc.h> |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 24 | #include <asm/mmu_context.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 25 | #include <asm/dma.h> |
| 26 | #include <asm/machdep.h> |
| 27 | #include <asm/mmu.h> |
| 28 | #include <asm/firmware.h> |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 29 | #include <asm/powernv.h> |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 30 | #include <asm/sections.h> |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 31 | #include <asm/trace.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 32 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 33 | #include <trace/events/thp.h> |
| 34 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 35 | unsigned int mmu_pid_bits; |
| 36 | unsigned int mmu_base_pid; |
| 37 | |
Aneesh Kumar K.V | 83209bc | 2016-07-13 15:05:28 +0530 | [diff] [blame] | 38 | static int native_register_process_table(unsigned long base, unsigned long pg_sz, |
| 39 | unsigned long table_size) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 40 | { |
Suraj Jitindar Singh | 7cd2a86 | 2017-08-03 14:15:51 +1000 | [diff] [blame] | 41 | unsigned long patb0, patb1; |
Aneesh Kumar K.V | 83209bc | 2016-07-13 15:05:28 +0530 | [diff] [blame] | 42 | |
Suraj Jitindar Singh | 7cd2a86 | 2017-08-03 14:15:51 +1000 | [diff] [blame] | 43 | patb0 = be64_to_cpu(partition_tb[0].patb0); |
| 44 | patb1 = base | table_size | PATB_GR; |
| 45 | |
| 46 | mmu_partition_table_set_entry(0, patb0, patb1); |
| 47 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 48 | return 0; |
| 49 | } |
| 50 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 51 | static __ref void *early_alloc_pgtable(unsigned long size, int nid, |
| 52 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 53 | { |
Mike Rapoport | f806714 | 2019-03-07 16:30:48 -0800 | [diff] [blame] | 54 | phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT; |
| 55 | phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE; |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 56 | void *ptr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 57 | |
Mike Rapoport | f806714 | 2019-03-07 16:30:48 -0800 | [diff] [blame] | 58 | if (region_start) |
| 59 | min_addr = region_start; |
| 60 | if (region_end) |
| 61 | max_addr = region_end; |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 62 | |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 63 | ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid); |
| 64 | |
| 65 | if (!ptr) |
| 66 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n", |
| 67 | __func__, size, size, nid, &min_addr, &max_addr); |
| 68 | |
| 69 | return ptr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 70 | } |
| 71 | |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 72 | static int early_map_kernel_page(unsigned long ea, unsigned long pa, |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 73 | pgprot_t flags, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 74 | unsigned int map_page_size, |
| 75 | int nid, |
| 76 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 77 | { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 78 | unsigned long pfn = pa >> PAGE_SHIFT; |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 79 | pgd_t *pgdp; |
| 80 | pud_t *pudp; |
| 81 | pmd_t *pmdp; |
| 82 | pte_t *ptep; |
| 83 | |
| 84 | pgdp = pgd_offset_k(ea); |
| 85 | if (pgd_none(*pgdp)) { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 86 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid, |
| 87 | region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 88 | pgd_populate(&init_mm, pgdp, pudp); |
| 89 | } |
| 90 | pudp = pud_offset(pgdp, ea); |
| 91 | if (map_page_size == PUD_SIZE) { |
| 92 | ptep = (pte_t *)pudp; |
| 93 | goto set_the_pte; |
| 94 | } |
| 95 | if (pud_none(*pudp)) { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 96 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid, |
| 97 | region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 98 | pud_populate(&init_mm, pudp, pmdp); |
| 99 | } |
| 100 | pmdp = pmd_offset(pudp, ea); |
| 101 | if (map_page_size == PMD_SIZE) { |
| 102 | ptep = pmdp_ptep(pmdp); |
| 103 | goto set_the_pte; |
| 104 | } |
| 105 | if (!pmd_present(*pmdp)) { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 106 | ptep = early_alloc_pgtable(PAGE_SIZE, nid, |
| 107 | region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 108 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 109 | } |
| 110 | ptep = pte_offset_kernel(pmdp, ea); |
| 111 | |
| 112 | set_the_pte: |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 113 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 114 | smp_wmb(); |
| 115 | return 0; |
| 116 | } |
| 117 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 118 | /* |
| 119 | * nid, region_start, and region_end are hints to try to place the page |
| 120 | * table memory in the same node or region. |
| 121 | */ |
| 122 | static int __map_kernel_page(unsigned long ea, unsigned long pa, |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 123 | pgprot_t flags, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 124 | unsigned int map_page_size, |
| 125 | int nid, |
| 126 | unsigned long region_start, unsigned long region_end) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 127 | { |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 128 | unsigned long pfn = pa >> PAGE_SHIFT; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 129 | pgd_t *pgdp; |
| 130 | pud_t *pudp; |
| 131 | pmd_t *pmdp; |
| 132 | pte_t *ptep; |
| 133 | /* |
| 134 | * Make sure task size is correct as per the max adddr |
| 135 | */ |
| 136 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 137 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 138 | if (unlikely(!slab_is_available())) |
| 139 | return early_map_kernel_page(ea, pa, flags, map_page_size, |
| 140 | nid, region_start, region_end); |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 141 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 142 | /* |
| 143 | * Should make page table allocation functions be able to take a |
| 144 | * node, so we can place kernel page tables on the right nodes after |
| 145 | * boot. |
| 146 | */ |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 147 | pgdp = pgd_offset_k(ea); |
| 148 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 149 | if (!pudp) |
| 150 | return -ENOMEM; |
| 151 | if (map_page_size == PUD_SIZE) { |
| 152 | ptep = (pte_t *)pudp; |
| 153 | goto set_the_pte; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 154 | } |
Nicholas Piggin | 0633daf | 2018-02-14 01:08:23 +1000 | [diff] [blame] | 155 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 156 | if (!pmdp) |
| 157 | return -ENOMEM; |
| 158 | if (map_page_size == PMD_SIZE) { |
| 159 | ptep = pmdp_ptep(pmdp); |
| 160 | goto set_the_pte; |
| 161 | } |
| 162 | ptep = pte_alloc_kernel(pmdp, ea); |
| 163 | if (!ptep) |
| 164 | return -ENOMEM; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 165 | |
| 166 | set_the_pte: |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 167 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags)); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 168 | smp_wmb(); |
| 169 | return 0; |
| 170 | } |
| 171 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 172 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, |
| 173 | pgprot_t flags, |
| 174 | unsigned int map_page_size) |
| 175 | { |
| 176 | return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0); |
| 177 | } |
| 178 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 179 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 180 | void radix__change_memory_range(unsigned long start, unsigned long end, |
| 181 | unsigned long clear) |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 182 | { |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 183 | unsigned long idx; |
| 184 | pgd_t *pgdp; |
| 185 | pud_t *pudp; |
| 186 | pmd_t *pmdp; |
| 187 | pte_t *ptep; |
| 188 | |
| 189 | start = ALIGN_DOWN(start, PAGE_SIZE); |
| 190 | end = PAGE_ALIGN(end); // aligns up |
| 191 | |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 192 | pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", |
| 193 | start, end, clear); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 194 | |
| 195 | for (idx = start; idx < end; idx += PAGE_SIZE) { |
| 196 | pgdp = pgd_offset_k(idx); |
| 197 | pudp = pud_alloc(&init_mm, pgdp, idx); |
| 198 | if (!pudp) |
| 199 | continue; |
| 200 | if (pud_huge(*pudp)) { |
| 201 | ptep = (pte_t *)pudp; |
| 202 | goto update_the_pte; |
| 203 | } |
| 204 | pmdp = pmd_alloc(&init_mm, pudp, idx); |
| 205 | if (!pmdp) |
| 206 | continue; |
| 207 | if (pmd_huge(*pmdp)) { |
| 208 | ptep = pmdp_ptep(pmdp); |
| 209 | goto update_the_pte; |
| 210 | } |
| 211 | ptep = pte_alloc_kernel(pmdp, idx); |
| 212 | if (!ptep) |
| 213 | continue; |
| 214 | update_the_pte: |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 215 | radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 216 | } |
| 217 | |
| 218 | radix__flush_tlb_kernel_range(start, end); |
| 219 | } |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 220 | |
| 221 | void radix__mark_rodata_ro(void) |
| 222 | { |
| 223 | unsigned long start, end; |
| 224 | |
| 225 | start = (unsigned long)_stext; |
| 226 | end = (unsigned long)__init_begin; |
| 227 | |
| 228 | radix__change_memory_range(start, end, _PAGE_WRITE); |
| 229 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 230 | |
| 231 | void radix__mark_initmem_nx(void) |
| 232 | { |
| 233 | unsigned long start = (unsigned long)__init_begin; |
| 234 | unsigned long end = (unsigned long)__init_end; |
| 235 | |
| 236 | radix__change_memory_range(start, end, _PAGE_EXEC); |
| 237 | } |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 238 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
| 239 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 240 | static inline void __meminit |
| 241 | print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec) |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 242 | { |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 243 | char buf[10]; |
| 244 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 245 | if (end <= start) |
| 246 | return; |
| 247 | |
Michael Ellerman | 6deb6b4 | 2017-08-30 17:41:17 +1000 | [diff] [blame] | 248 | string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf)); |
| 249 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 250 | pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf, |
| 251 | exec ? " (exec)" : ""); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 252 | } |
| 253 | |
Michael Ellerman | 232aa40 | 2018-08-14 22:37:32 +1000 | [diff] [blame] | 254 | static unsigned long next_boundary(unsigned long addr, unsigned long end) |
| 255 | { |
| 256 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 257 | if (addr < __pa_symbol(__init_begin)) |
| 258 | return __pa_symbol(__init_begin); |
| 259 | #endif |
| 260 | return end; |
| 261 | } |
| 262 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 263 | static int __meminit create_physical_mapping(unsigned long start, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 264 | unsigned long end, |
| 265 | int nid) |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 266 | { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 267 | unsigned long vaddr, addr, mapping_size = 0; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 268 | bool prev_exec, exec = false; |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 269 | pgprot_t prot; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 270 | int psize; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 271 | |
| 272 | start = _ALIGN_UP(start, PAGE_SIZE); |
| 273 | for (addr = start; addr < end; addr += mapping_size) { |
| 274 | unsigned long gap, previous_size; |
| 275 | int rc; |
| 276 | |
Michael Ellerman | 232aa40 | 2018-08-14 22:37:32 +1000 | [diff] [blame] | 277 | gap = next_boundary(addr, end) - addr; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 278 | previous_size = mapping_size; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 279 | prev_exec = exec; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 280 | |
| 281 | if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && |
Michael Ellerman | 57306c6 | 2018-08-14 22:01:44 +1000 | [diff] [blame] | 282 | mmu_psize_defs[MMU_PAGE_1G].shift) { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 283 | mapping_size = PUD_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 284 | psize = MMU_PAGE_1G; |
| 285 | } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && |
| 286 | mmu_psize_defs[MMU_PAGE_2M].shift) { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 287 | mapping_size = PMD_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 288 | psize = MMU_PAGE_2M; |
| 289 | } else { |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 290 | mapping_size = PAGE_SIZE; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 291 | psize = mmu_virtual_psize; |
| 292 | } |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 293 | |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 294 | vaddr = (unsigned long)__va(addr); |
| 295 | |
Balbir Singh | 7f6d498 | 2017-06-29 03:04:10 +1000 | [diff] [blame] | 296 | if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 297 | overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 298 | prot = PAGE_KERNEL_X; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 299 | exec = true; |
| 300 | } else { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 301 | prot = PAGE_KERNEL; |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 302 | exec = false; |
| 303 | } |
| 304 | |
| 305 | if (mapping_size != previous_size || exec != prev_exec) { |
| 306 | print_mapping(start, addr, previous_size, prev_exec); |
| 307 | start = addr; |
| 308 | } |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 309 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 310 | rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 311 | if (rc) |
| 312 | return rc; |
Aneesh Kumar K.V | a2dc009 | 2018-08-13 11:14:57 +0530 | [diff] [blame] | 313 | |
| 314 | update_page_count(psize, 1); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 315 | } |
| 316 | |
Michael Ellerman | afb6d06 | 2018-10-17 23:53:38 +1100 | [diff] [blame] | 317 | print_mapping(start, addr, mapping_size, exec); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 318 | return 0; |
| 319 | } |
| 320 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 321 | void __init radix_init_pgtable(void) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 322 | { |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 323 | unsigned long rts_field; |
| 324 | struct memblock_region *reg; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 325 | |
| 326 | /* We don't support slb for radix */ |
| 327 | mmu_slb_size = 0; |
| 328 | /* |
| 329 | * Create the linear mapping, using standard page size for now |
| 330 | */ |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 331 | for_each_memblock(memory, reg) { |
| 332 | /* |
| 333 | * The memblock allocator is up at this point, so the |
| 334 | * page tables will be allocated within the range. No |
| 335 | * need or a node (which we don't have yet). |
| 336 | */ |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 337 | WARN_ON(create_physical_mapping(reg->base, |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 338 | reg->base + reg->size, |
| 339 | -1)); |
| 340 | } |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 341 | |
| 342 | /* Find out how many PID bits are supported */ |
| 343 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
| 344 | if (!mmu_pid_bits) |
| 345 | mmu_pid_bits = 20; |
| 346 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 347 | /* |
| 348 | * When KVM is possible, we only use the top half of the |
| 349 | * PID space to avoid collisions between host and guest PIDs |
| 350 | * which can cause problems due to prefetch when exiting the |
| 351 | * guest with AIL=3 |
| 352 | */ |
| 353 | mmu_base_pid = 1 << (mmu_pid_bits - 1); |
| 354 | #else |
| 355 | mmu_base_pid = 1; |
| 356 | #endif |
| 357 | } else { |
| 358 | /* The guest uses the bottom half of the PID space */ |
| 359 | if (!mmu_pid_bits) |
| 360 | mmu_pid_bits = 19; |
| 361 | mmu_base_pid = 1; |
| 362 | } |
| 363 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 364 | /* |
| 365 | * Allocate Partition table and process table for the |
| 366 | * host. |
| 367 | */ |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 368 | BUG_ON(PRTB_SIZE_SHIFT > 36); |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 369 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 370 | /* |
| 371 | * Fill in the process table. |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 372 | */ |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 373 | rts_field = radix__get_tree_size(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 374 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); |
| 375 | /* |
| 376 | * Fill in the partition table. We are suppose to use effective address |
| 377 | * of process table here. But our linear mapping also enable us to use |
| 378 | * physical address here. |
| 379 | */ |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 380 | register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 381 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); |
Paul Mackerras | 7a70d72 | 2017-02-27 14:32:41 +1100 | [diff] [blame] | 382 | asm volatile("ptesync" : : : "memory"); |
| 383 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : |
| 384 | "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); |
| 385 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 386 | trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * The init_mm context is given the first available (non-zero) PID, |
| 390 | * which is the "guard PID" and contains no page table. PIDR should |
| 391 | * never be set to zero because that duplicates the kernel address |
| 392 | * space at the 0x0... offset (quadrant 0)! |
| 393 | * |
| 394 | * An arbitrary PID that may later be allocated by the PID allocator |
| 395 | * for userspace processes must not be used either, because that |
| 396 | * would cause stale user mappings for that PID on CPUs outside of |
| 397 | * the TLB invalidation scheme (because it won't be in mm_cpumask). |
| 398 | * |
| 399 | * So permanently carve out one PID for the purpose of a guard PID. |
| 400 | */ |
| 401 | init_mm.context.id = mmu_base_pid; |
| 402 | mmu_base_pid++; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | static void __init radix_init_partition_table(void) |
| 406 | { |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 407 | unsigned long rts_field, dw0; |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 408 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 409 | mmu_partition_table_init(); |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 410 | rts_field = radix__get_tree_size(); |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 411 | dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; |
| 412 | mmu_partition_table_set_entry(0, dw0, 0); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 413 | |
Aneesh Kumar K.V | 5654741 | 2016-07-13 15:05:25 +0530 | [diff] [blame] | 414 | pr_info("Initializing Radix MMU\n"); |
| 415 | pr_info("Partition table %p\n", partition_tb); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | void __init radix_init_native(void) |
| 419 | { |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 420 | register_process_table = native_register_process_table; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 421 | } |
| 422 | |
| 423 | static int __init get_idx_from_shift(unsigned int shift) |
| 424 | { |
| 425 | int idx = -1; |
| 426 | |
| 427 | switch (shift) { |
| 428 | case 0xc: |
| 429 | idx = MMU_PAGE_4K; |
| 430 | break; |
| 431 | case 0x10: |
| 432 | idx = MMU_PAGE_64K; |
| 433 | break; |
| 434 | case 0x15: |
| 435 | idx = MMU_PAGE_2M; |
| 436 | break; |
| 437 | case 0x1e: |
| 438 | idx = MMU_PAGE_1G; |
| 439 | break; |
| 440 | } |
| 441 | return idx; |
| 442 | } |
| 443 | |
| 444 | static int __init radix_dt_scan_page_sizes(unsigned long node, |
| 445 | const char *uname, int depth, |
| 446 | void *data) |
| 447 | { |
| 448 | int size = 0; |
| 449 | int shift, idx; |
| 450 | unsigned int ap; |
| 451 | const __be32 *prop; |
| 452 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
| 453 | |
| 454 | /* We are scanning "cpu" nodes only */ |
| 455 | if (type == NULL || strcmp(type, "cpu") != 0) |
| 456 | return 0; |
| 457 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 458 | /* Find MMU PID size */ |
| 459 | prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size); |
| 460 | if (prop && size == 4) |
| 461 | mmu_pid_bits = be32_to_cpup(prop); |
| 462 | |
| 463 | /* Grab page size encodings */ |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 464 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); |
| 465 | if (!prop) |
| 466 | return 0; |
| 467 | |
| 468 | pr_info("Page sizes from device-tree:\n"); |
| 469 | for (; size >= 4; size -= 4, ++prop) { |
| 470 | |
| 471 | struct mmu_psize_def *def; |
| 472 | |
| 473 | /* top 3 bit is AP encoding */ |
| 474 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); |
| 475 | ap = be32_to_cpu(prop[0]) >> 29; |
Balbir Singh | ac8d381 | 2016-11-05 15:24:22 +1100 | [diff] [blame] | 476 | pr_info("Page size shift = %d AP=0x%x\n", shift, ap); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 477 | |
| 478 | idx = get_idx_from_shift(shift); |
| 479 | if (idx < 0) |
| 480 | continue; |
| 481 | |
| 482 | def = &mmu_psize_defs[idx]; |
| 483 | def->shift = shift; |
| 484 | def->ap = ap; |
| 485 | } |
| 486 | |
| 487 | /* needed ? */ |
| 488 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; |
| 489 | return 1; |
| 490 | } |
| 491 | |
Michael Ellerman | 2537b09 | 2016-07-26 21:55:27 +1000 | [diff] [blame] | 492 | void __init radix__early_init_devtree(void) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 493 | { |
| 494 | int rc; |
| 495 | |
| 496 | /* |
| 497 | * Try to find the available page sizes in the device-tree |
| 498 | */ |
| 499 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); |
| 500 | if (rc != 0) /* Found */ |
| 501 | goto found; |
| 502 | /* |
| 503 | * let's assume we have page 4k and 64k support |
| 504 | */ |
| 505 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; |
| 506 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; |
| 507 | |
| 508 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; |
| 509 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; |
| 510 | found: |
| 511 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 512 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { |
| 513 | /* |
| 514 | * map vmemmap using 2M if available |
| 515 | */ |
| 516 | mmu_vmemmap_psize = MMU_PAGE_2M; |
| 517 | } |
| 518 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 519 | return; |
| 520 | } |
| 521 | |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 522 | static void radix_init_amor(void) |
| 523 | { |
| 524 | /* |
| 525 | * In HV mode, we init AMOR (Authority Mask Override Register) so that |
| 526 | * the hypervisor and guest can setup IAMR (Instruction Authority Mask |
| 527 | * Register), enable key 0 and set it to 1. |
| 528 | * |
| 529 | * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) |
| 530 | */ |
| 531 | mtspr(SPRN_AMOR, (3ul << 62)); |
| 532 | } |
| 533 | |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame^] | 534 | #ifdef CONFIG_PPC_KUEP |
| 535 | void setup_kuep(bool disabled) |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 536 | { |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame^] | 537 | if (disabled || !early_radix_enabled()) |
| 538 | return; |
| 539 | |
| 540 | if (smp_processor_id() == boot_cpuid) |
| 541 | pr_info("Activating Kernel Userspace Execution Prevention\n"); |
| 542 | |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 543 | /* |
| 544 | * Radix always uses key0 of the IAMR to determine if an access is |
| 545 | * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction |
| 546 | * fetch. |
| 547 | */ |
Nicholas Piggin | 2bf1071 | 2018-07-05 18:47:00 +1000 | [diff] [blame] | 548 | mtspr(SPRN_IAMR, (1ul << 62)); |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 549 | } |
Russell Currey | 1bb2bae | 2019-04-18 16:51:22 +1000 | [diff] [blame^] | 550 | #endif |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 551 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 552 | void __init radix__early_init_mmu(void) |
| 553 | { |
| 554 | unsigned long lpcr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 555 | |
| 556 | #ifdef CONFIG_PPC_64K_PAGES |
| 557 | /* PAGE_SIZE mappings */ |
| 558 | mmu_virtual_psize = MMU_PAGE_64K; |
| 559 | #else |
| 560 | mmu_virtual_psize = MMU_PAGE_4K; |
| 561 | #endif |
| 562 | |
| 563 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 564 | /* vmemmap mapping */ |
| 565 | mmu_vmemmap_psize = mmu_virtual_psize; |
| 566 | #endif |
| 567 | /* |
| 568 | * initialize page table size |
| 569 | */ |
| 570 | __pte_index_size = RADIX_PTE_INDEX_SIZE; |
| 571 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; |
| 572 | __pud_index_size = RADIX_PUD_INDEX_SIZE; |
| 573 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; |
Aneesh Kumar K.V | fae2211 | 2018-02-11 20:30:06 +0530 | [diff] [blame] | 574 | __pud_cache_index = RADIX_PUD_INDEX_SIZE; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 575 | __pte_table_size = RADIX_PTE_TABLE_SIZE; |
| 576 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; |
| 577 | __pud_table_size = RADIX_PUD_TABLE_SIZE; |
| 578 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; |
| 579 | |
Aneesh Kumar K.V | a2f41eb | 2016-04-29 23:26:19 +1000 | [diff] [blame] | 580 | __pmd_val_bits = RADIX_PMD_VAL_BITS; |
| 581 | __pud_val_bits = RADIX_PUD_VAL_BITS; |
| 582 | __pgd_val_bits = RADIX_PGD_VAL_BITS; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 583 | |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 584 | __kernel_virt_start = RADIX_KERN_VIRT_START; |
| 585 | __kernel_virt_size = RADIX_KERN_VIRT_SIZE; |
| 586 | __vmalloc_start = RADIX_VMALLOC_START; |
| 587 | __vmalloc_end = RADIX_VMALLOC_END; |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 588 | __kernel_io_start = RADIX_KERN_IO_START; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 589 | vmemmap = (struct page *)RADIX_VMEMMAP_BASE; |
| 590 | ioremap_bot = IOREMAP_BASE; |
Darren Stevens | bfa3708 | 2016-06-29 21:06:28 +0100 | [diff] [blame] | 591 | |
| 592 | #ifdef CONFIG_PCI |
| 593 | pci_io_base = ISA_IO_BASE; |
| 594 | #endif |
Aneesh Kumar K.V | fb4e5db | 2018-03-22 14:13:50 +0530 | [diff] [blame] | 595 | __pte_frag_nr = RADIX_PTE_FRAG_NR; |
| 596 | __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT; |
Aneesh Kumar K.V | 8a6c697 | 2018-04-16 16:57:22 +0530 | [diff] [blame] | 597 | __pmd_frag_nr = RADIX_PMD_FRAG_NR; |
| 598 | __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 599 | |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 600 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
Benjamin Herrenschmidt | 166dd7d | 2016-07-05 15:03:51 +1000 | [diff] [blame] | 601 | radix_init_native(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 602 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 603 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 604 | radix_init_partition_table(); |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 605 | radix_init_amor(); |
Paul Mackerras | cc3d294 | 2017-01-30 21:21:36 +1100 | [diff] [blame] | 606 | } else { |
| 607 | radix_init_pseries(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 608 | } |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 609 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 610 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
| 611 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 612 | radix_init_pgtable(); |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 613 | /* Switch to the guard PID before turning on MMU */ |
| 614 | radix__switch_mmu_context(NULL, &init_mm); |
Nicholas Piggin | d474827 | 2017-12-24 01:15:50 +1000 | [diff] [blame] | 615 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
| 616 | tlbiel_all(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 617 | } |
| 618 | |
| 619 | void radix__early_init_mmu_secondary(void) |
| 620 | { |
| 621 | unsigned long lpcr; |
| 622 | /* |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 623 | * update partition table control register and UPRT |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 624 | */ |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 625 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 626 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 627 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 628 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 629 | mtspr(SPRN_PTCR, |
| 630 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 631 | radix_init_amor(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 632 | } |
Nicholas Piggin | d474827 | 2017-12-24 01:15:50 +1000 | [diff] [blame] | 633 | |
Nicholas Piggin | eeb715c | 2018-02-07 11:20:02 +1000 | [diff] [blame] | 634 | radix__switch_mmu_context(NULL, &init_mm); |
Nicholas Piggin | d474827 | 2017-12-24 01:15:50 +1000 | [diff] [blame] | 635 | if (cpu_has_feature(CPU_FTR_HVMODE)) |
| 636 | tlbiel_all(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 637 | } |
| 638 | |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 639 | void radix__mmu_cleanup_all(void) |
| 640 | { |
| 641 | unsigned long lpcr; |
| 642 | |
| 643 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 644 | lpcr = mfspr(SPRN_LPCR); |
| 645 | mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); |
| 646 | mtspr(SPRN_PTCR, 0); |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 647 | powernv_set_nmmu_ptcr(0); |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 648 | radix__flush_tlb_all(); |
| 649 | } |
| 650 | } |
| 651 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 652 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 653 | phys_addr_t first_memblock_size) |
| 654 | { |
Aneesh Kumar K.V | 177ba7c | 2016-04-29 23:26:10 +1000 | [diff] [blame] | 655 | /* We don't currently support the first MEMBLOCK not mapping 0 |
| 656 | * physical on those processors |
| 657 | */ |
| 658 | BUG_ON(first_memblock_base != 0); |
Nicholas Piggin | 1513c33 | 2017-12-22 21:17:08 +1000 | [diff] [blame] | 659 | |
Nicholas Piggin | 5eae82c | 2017-12-22 21:17:11 +1000 | [diff] [blame] | 660 | /* |
| 661 | * Radix mode is not limited by RMA / VRMA addressing. |
| 662 | */ |
| 663 | ppc64_rma_size = ULONG_MAX; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 664 | } |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 665 | |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 666 | #ifdef CONFIG_MEMORY_HOTPLUG |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 667 | static void free_pte_table(pte_t *pte_start, pmd_t *pmd) |
| 668 | { |
| 669 | pte_t *pte; |
| 670 | int i; |
| 671 | |
| 672 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 673 | pte = pte_start + i; |
| 674 | if (!pte_none(*pte)) |
| 675 | return; |
| 676 | } |
| 677 | |
| 678 | pte_free_kernel(&init_mm, pte_start); |
| 679 | pmd_clear(pmd); |
| 680 | } |
| 681 | |
| 682 | static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
| 683 | { |
| 684 | pmd_t *pmd; |
| 685 | int i; |
| 686 | |
| 687 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 688 | pmd = pmd_start + i; |
| 689 | if (!pmd_none(*pmd)) |
| 690 | return; |
| 691 | } |
| 692 | |
| 693 | pmd_free(&init_mm, pmd_start); |
| 694 | pud_clear(pud); |
| 695 | } |
| 696 | |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 697 | struct change_mapping_params { |
| 698 | pte_t *pte; |
| 699 | unsigned long start; |
| 700 | unsigned long end; |
| 701 | unsigned long aligned_start; |
| 702 | unsigned long aligned_end; |
| 703 | }; |
| 704 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 705 | static int __meminit stop_machine_change_mapping(void *data) |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 706 | { |
| 707 | struct change_mapping_params *params = |
| 708 | (struct change_mapping_params *)data; |
| 709 | |
| 710 | if (!data) |
| 711 | return -1; |
| 712 | |
| 713 | spin_unlock(&init_mm.page_table_lock); |
| 714 | pte_clear(&init_mm, params->aligned_start, params->pte); |
Michael Ellerman | f437c51 | 2018-03-31 00:11:24 +1100 | [diff] [blame] | 715 | create_physical_mapping(params->aligned_start, params->start, -1); |
| 716 | create_physical_mapping(params->end, params->aligned_end, -1); |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 717 | spin_lock(&init_mm.page_table_lock); |
| 718 | return 0; |
| 719 | } |
| 720 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 721 | static void remove_pte_table(pte_t *pte_start, unsigned long addr, |
| 722 | unsigned long end) |
| 723 | { |
| 724 | unsigned long next; |
| 725 | pte_t *pte; |
| 726 | |
| 727 | pte = pte_start + pte_index(addr); |
| 728 | for (; addr < end; addr = next, pte++) { |
| 729 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
| 730 | if (next > end) |
| 731 | next = end; |
| 732 | |
| 733 | if (!pte_present(*pte)) |
| 734 | continue; |
| 735 | |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 736 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) { |
| 737 | /* |
| 738 | * The vmemmap_free() and remove_section_mapping() |
| 739 | * codepaths call us with aligned addresses. |
| 740 | */ |
| 741 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 742 | continue; |
| 743 | } |
| 744 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 745 | pte_clear(&init_mm, addr, pte); |
| 746 | } |
| 747 | } |
| 748 | |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 749 | /* |
| 750 | * clear the pte and potentially split the mapping helper |
| 751 | */ |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 752 | static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end, |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 753 | unsigned long size, pte_t *pte) |
| 754 | { |
| 755 | unsigned long mask = ~(size - 1); |
| 756 | unsigned long aligned_start = addr & mask; |
| 757 | unsigned long aligned_end = addr + size; |
| 758 | struct change_mapping_params params; |
| 759 | bool split_region = false; |
| 760 | |
| 761 | if ((end - addr) < size) { |
| 762 | /* |
| 763 | * We're going to clear the PTE, but not flushed |
| 764 | * the mapping, time to remap and flush. The |
| 765 | * effects if visible outside the processor or |
| 766 | * if we are running in code close to the |
| 767 | * mapping we cleared, we are in trouble. |
| 768 | */ |
| 769 | if (overlaps_kernel_text(aligned_start, addr) || |
| 770 | overlaps_kernel_text(end, aligned_end)) { |
| 771 | /* |
| 772 | * Hack, just return, don't pte_clear |
| 773 | */ |
| 774 | WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel " |
| 775 | "text, not splitting\n", addr, end); |
| 776 | return; |
| 777 | } |
| 778 | split_region = true; |
| 779 | } |
| 780 | |
| 781 | if (split_region) { |
| 782 | params.pte = pte; |
| 783 | params.start = addr; |
| 784 | params.end = end; |
| 785 | params.aligned_start = addr & ~(size - 1); |
| 786 | params.aligned_end = min_t(unsigned long, aligned_end, |
| 787 | (unsigned long)__va(memblock_end_of_DRAM())); |
| 788 | stop_machine(stop_machine_change_mapping, ¶ms, NULL); |
| 789 | return; |
| 790 | } |
| 791 | |
| 792 | pte_clear(&init_mm, addr, pte); |
| 793 | } |
| 794 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 795 | static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, |
| 796 | unsigned long end) |
| 797 | { |
| 798 | unsigned long next; |
| 799 | pte_t *pte_base; |
| 800 | pmd_t *pmd; |
| 801 | |
| 802 | pmd = pmd_start + pmd_index(addr); |
| 803 | for (; addr < end; addr = next, pmd++) { |
| 804 | next = pmd_addr_end(addr, end); |
| 805 | |
| 806 | if (!pmd_present(*pmd)) |
| 807 | continue; |
| 808 | |
| 809 | if (pmd_huge(*pmd)) { |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 810 | split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 811 | continue; |
| 812 | } |
| 813 | |
| 814 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); |
| 815 | remove_pte_table(pte_base, addr, next); |
| 816 | free_pte_table(pte_base, pmd); |
| 817 | } |
| 818 | } |
| 819 | |
| 820 | static void remove_pud_table(pud_t *pud_start, unsigned long addr, |
| 821 | unsigned long end) |
| 822 | { |
| 823 | unsigned long next; |
| 824 | pmd_t *pmd_base; |
| 825 | pud_t *pud; |
| 826 | |
| 827 | pud = pud_start + pud_index(addr); |
| 828 | for (; addr < end; addr = next, pud++) { |
| 829 | next = pud_addr_end(addr, end); |
| 830 | |
| 831 | if (!pud_present(*pud)) |
| 832 | continue; |
| 833 | |
| 834 | if (pud_huge(*pud)) { |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 835 | split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 836 | continue; |
| 837 | } |
| 838 | |
| 839 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); |
| 840 | remove_pmd_table(pmd_base, addr, next); |
| 841 | free_pmd_table(pmd_base, pud); |
| 842 | } |
| 843 | } |
| 844 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 845 | static void __meminit remove_pagetable(unsigned long start, unsigned long end) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 846 | { |
| 847 | unsigned long addr, next; |
| 848 | pud_t *pud_base; |
| 849 | pgd_t *pgd; |
| 850 | |
| 851 | spin_lock(&init_mm.page_table_lock); |
| 852 | |
| 853 | for (addr = start; addr < end; addr = next) { |
| 854 | next = pgd_addr_end(addr, end); |
| 855 | |
| 856 | pgd = pgd_offset_k(addr); |
| 857 | if (!pgd_present(*pgd)) |
| 858 | continue; |
| 859 | |
| 860 | if (pgd_huge(*pgd)) { |
Balbir Singh | 4dd5f8a9 | 2018-02-07 17:35:51 +1100 | [diff] [blame] | 861 | split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd); |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 862 | continue; |
| 863 | } |
| 864 | |
| 865 | pud_base = (pud_t *)pgd_page_vaddr(*pgd); |
| 866 | remove_pud_table(pud_base, addr, next); |
| 867 | } |
| 868 | |
| 869 | spin_unlock(&init_mm.page_table_lock); |
| 870 | radix__flush_tlb_kernel_range(start, end); |
| 871 | } |
| 872 | |
Michael Ellerman | f437c51 | 2018-03-31 00:11:24 +1100 | [diff] [blame] | 873 | int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid) |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 874 | { |
Nicholas Piggin | 29ab6c4 | 2018-02-14 01:08:22 +1000 | [diff] [blame] | 875 | return create_physical_mapping(start, end, nid); |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 876 | } |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 877 | |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 878 | int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end) |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 879 | { |
| 880 | remove_pagetable(start, end); |
| 881 | return 0; |
| 882 | } |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 883 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 884 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 885 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
Nicholas Piggin | 29ab6c4 | 2018-02-14 01:08:22 +1000 | [diff] [blame] | 886 | static int __map_kernel_page_nid(unsigned long ea, unsigned long pa, |
| 887 | pgprot_t flags, unsigned int map_page_size, |
| 888 | int nid) |
| 889 | { |
| 890 | return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0); |
| 891 | } |
| 892 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 893 | int __meminit radix__vmemmap_create_mapping(unsigned long start, |
| 894 | unsigned long page_size, |
| 895 | unsigned long phys) |
| 896 | { |
| 897 | /* Create a PTE encoding */ |
| 898 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 899 | int nid = early_pfn_to_nid(phys >> PAGE_SHIFT); |
| 900 | int ret; |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 901 | |
Nicholas Piggin | 2ad452f | 2018-02-14 01:08:24 +1000 | [diff] [blame] | 902 | ret = __map_kernel_page_nid(start, phys, __pgprot(flags), page_size, nid); |
| 903 | BUG_ON(ret); |
| 904 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 905 | return 0; |
| 906 | } |
| 907 | |
| 908 | #ifdef CONFIG_MEMORY_HOTPLUG |
Mauricio Faria de Oliveira | bde709a | 2018-03-09 17:45:58 -0300 | [diff] [blame] | 909 | void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 910 | { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 911 | remove_pagetable(start, start + page_size); |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 912 | } |
| 913 | #endif |
| 914 | #endif |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 915 | |
| 916 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 917 | |
| 918 | unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 919 | pmd_t *pmdp, unsigned long clr, |
| 920 | unsigned long set) |
| 921 | { |
| 922 | unsigned long old; |
| 923 | |
| 924 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 925 | WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | af60a4c | 2018-04-16 16:57:16 +0530 | [diff] [blame] | 926 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 927 | #endif |
| 928 | |
| 929 | old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); |
| 930 | trace_hugepage_update(addr, old, clr, set); |
| 931 | |
| 932 | return old; |
| 933 | } |
| 934 | |
| 935 | pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 936 | pmd_t *pmdp) |
| 937 | |
| 938 | { |
| 939 | pmd_t pmd; |
| 940 | |
| 941 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 942 | VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 943 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 944 | /* |
| 945 | * khugepaged calls this for normal pmd |
| 946 | */ |
| 947 | pmd = *pmdp; |
| 948 | pmd_clear(pmdp); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 949 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 950 | /*FIXME!! Verify whether we need this kick below */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 951 | serialize_against_pte_lookup(vma->vm_mm); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 952 | |
| 953 | radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); |
| 954 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 955 | return pmd; |
| 956 | } |
| 957 | |
| 958 | /* |
| 959 | * For us pgtable_t is pte_t *. Inorder to save the deposisted |
| 960 | * page table, we consider the allocated page table as a list |
| 961 | * head. On withdraw we need to make sure we zero out the used |
| 962 | * list_head memory area. |
| 963 | */ |
| 964 | void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 965 | pgtable_t pgtable) |
| 966 | { |
| 967 | struct list_head *lh = (struct list_head *) pgtable; |
| 968 | |
| 969 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
| 970 | |
| 971 | /* FIFO */ |
| 972 | if (!pmd_huge_pte(mm, pmdp)) |
| 973 | INIT_LIST_HEAD(lh); |
| 974 | else |
| 975 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 976 | pmd_huge_pte(mm, pmdp) = pgtable; |
| 977 | } |
| 978 | |
| 979 | pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
| 980 | { |
| 981 | pte_t *ptep; |
| 982 | pgtable_t pgtable; |
| 983 | struct list_head *lh; |
| 984 | |
| 985 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
| 986 | |
| 987 | /* FIFO */ |
| 988 | pgtable = pmd_huge_pte(mm, pmdp); |
| 989 | lh = (struct list_head *) pgtable; |
| 990 | if (list_empty(lh)) |
| 991 | pmd_huge_pte(mm, pmdp) = NULL; |
| 992 | else { |
| 993 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
| 994 | list_del(lh); |
| 995 | } |
| 996 | ptep = (pte_t *) pgtable; |
| 997 | *ptep = __pte(0); |
| 998 | ptep++; |
| 999 | *ptep = __pte(0); |
| 1000 | return pgtable; |
| 1001 | } |
| 1002 | |
| 1003 | |
| 1004 | pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 1005 | unsigned long addr, pmd_t *pmdp) |
| 1006 | { |
| 1007 | pmd_t old_pmd; |
| 1008 | unsigned long old; |
| 1009 | |
| 1010 | old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 1011 | old_pmd = __pmd(old); |
| 1012 | /* |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 1013 | * Serialize against find_current_mm_pte which does lock-less |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1014 | * lookup in page tables with local interrupts disabled. For huge pages |
| 1015 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 1016 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 1017 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 1018 | * We clear pmd to possibly replace it with page table pointer in |
| 1019 | * different code paths. So make sure we wait for the parallel |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 1020 | * find_current_mm_pte to finish. |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1021 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 1022 | serialize_against_pte_lookup(mm); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 1023 | return old_pmd; |
| 1024 | } |
| 1025 | |
| 1026 | int radix__has_transparent_hugepage(void) |
| 1027 | { |
| 1028 | /* For radix 2M at PMD level means thp */ |
| 1029 | if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT) |
| 1030 | return 1; |
| 1031 | return 0; |
| 1032 | } |
| 1033 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1034 | |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 1035 | void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, |
| 1036 | pte_t entry, unsigned long address, int psize) |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1037 | { |
Aneesh Kumar K.V | e4c1112 | 2018-05-29 19:58:40 +0530 | [diff] [blame] | 1038 | struct mm_struct *mm = vma->vm_mm; |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1039 | unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | |
| 1040 | _PAGE_RW | _PAGE_EXEC); |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1041 | |
| 1042 | unsigned long change = pte_val(entry) ^ pte_val(*ptep); |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1043 | /* |
| 1044 | * To avoid NMMU hang while relaxing access, we need mark |
| 1045 | * the pte invalid in between. |
| 1046 | */ |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1047 | if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) { |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1048 | unsigned long old_pte, new_pte; |
| 1049 | |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1050 | old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID); |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1051 | /* |
| 1052 | * new value of pte |
| 1053 | */ |
| 1054 | new_pte = old_pte | set; |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1055 | radix__flush_tlb_page_psize(mm, address, psize); |
Aneesh Kumar K.V | f08d08f | 2018-08-22 22:46:05 +0530 | [diff] [blame] | 1056 | __radix_pte_update(ptep, _PAGE_INVALID, new_pte); |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1057 | } else { |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1058 | __radix_pte_update(ptep, 0, set); |
Nicholas Piggin | e5f7cb5 | 2018-06-01 20:01:15 +1000 | [diff] [blame] | 1059 | /* |
| 1060 | * Book3S does not require a TLB flush when relaxing access |
| 1061 | * restrictions when the address space is not attached to a |
| 1062 | * NMMU, because the core MMU will reload the pte after taking |
| 1063 | * an access fault, which is defined by the architectue. |
| 1064 | */ |
Aneesh Kumar K.V | bd5050e | 2018-05-29 19:58:41 +0530 | [diff] [blame] | 1065 | } |
Nicholas Piggin | f1cb8f9 | 2018-06-01 20:01:19 +1000 | [diff] [blame] | 1066 | /* See ptesync comment in radix__set_pte_at */ |
Aneesh Kumar K.V | 044003b | 2018-05-29 19:58:39 +0530 | [diff] [blame] | 1067 | } |
Aneesh Kumar K.V | 5b32336 | 2019-03-05 15:46:33 -0800 | [diff] [blame] | 1068 | |
| 1069 | void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, |
| 1070 | unsigned long addr, pte_t *ptep, |
| 1071 | pte_t old_pte, pte_t pte) |
| 1072 | { |
| 1073 | struct mm_struct *mm = vma->vm_mm; |
| 1074 | |
| 1075 | /* |
| 1076 | * To avoid NMMU hang while relaxing access we need to flush the tlb before |
| 1077 | * we set the new value. We need to do this only for radix, because hash |
| 1078 | * translation does flush when updating the linux pte. |
| 1079 | */ |
| 1080 | if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) && |
| 1081 | (atomic_read(&mm->context.copros) > 0)) |
| 1082 | radix__flush_tlb_page(vma, addr); |
| 1083 | |
| 1084 | set_pte_at(mm, addr, ptep, pte); |
| 1085 | } |