Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Page table handling routines for radix page table. |
| 3 | * |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
Michael Ellerman | bd350f7 | 2017-08-30 17:41:29 +1000 | [diff] [blame^] | 11 | |
| 12 | #define pr_fmt(fmt) "radix-mmu: " fmt |
| 13 | |
| 14 | #include <linux/kernel.h> |
Ingo Molnar | 589ee62 | 2017-02-04 00:16:44 +0100 | [diff] [blame] | 15 | #include <linux/sched/mm.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 16 | #include <linux/memblock.h> |
| 17 | #include <linux/of_fdt.h> |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 18 | #include <linux/mm.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 19 | |
| 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/pgalloc.h> |
| 22 | #include <asm/dma.h> |
| 23 | #include <asm/machdep.h> |
| 24 | #include <asm/mmu.h> |
| 25 | #include <asm/firmware.h> |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 26 | #include <asm/powernv.h> |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 27 | #include <asm/sections.h> |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 28 | #include <asm/trace.h> |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 29 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 30 | #include <trace/events/thp.h> |
| 31 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 32 | unsigned int mmu_pid_bits; |
| 33 | unsigned int mmu_base_pid; |
| 34 | |
Aneesh Kumar K.V | 83209bc | 2016-07-13 15:05:28 +0530 | [diff] [blame] | 35 | static int native_register_process_table(unsigned long base, unsigned long pg_sz, |
| 36 | unsigned long table_size) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 37 | { |
Suraj Jitindar Singh | 7cd2a86 | 2017-08-03 14:15:51 +1000 | [diff] [blame] | 38 | unsigned long patb0, patb1; |
Aneesh Kumar K.V | 83209bc | 2016-07-13 15:05:28 +0530 | [diff] [blame] | 39 | |
Suraj Jitindar Singh | 7cd2a86 | 2017-08-03 14:15:51 +1000 | [diff] [blame] | 40 | patb0 = be64_to_cpu(partition_tb[0].patb0); |
| 41 | patb1 = base | table_size | PATB_GR; |
| 42 | |
| 43 | mmu_partition_table_set_entry(0, patb0, patb1); |
| 44 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | static __ref void *early_alloc_pgtable(unsigned long size) |
| 49 | { |
| 50 | void *pt; |
| 51 | |
| 52 | pt = __va(memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE)); |
| 53 | memset(pt, 0, size); |
| 54 | |
| 55 | return pt; |
| 56 | } |
| 57 | |
| 58 | int radix__map_kernel_page(unsigned long ea, unsigned long pa, |
| 59 | pgprot_t flags, |
| 60 | unsigned int map_page_size) |
| 61 | { |
| 62 | pgd_t *pgdp; |
| 63 | pud_t *pudp; |
| 64 | pmd_t *pmdp; |
| 65 | pte_t *ptep; |
| 66 | /* |
| 67 | * Make sure task size is correct as per the max adddr |
| 68 | */ |
| 69 | BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE); |
| 70 | if (slab_is_available()) { |
| 71 | pgdp = pgd_offset_k(ea); |
| 72 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 73 | if (!pudp) |
| 74 | return -ENOMEM; |
| 75 | if (map_page_size == PUD_SIZE) { |
| 76 | ptep = (pte_t *)pudp; |
| 77 | goto set_the_pte; |
| 78 | } |
| 79 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 80 | if (!pmdp) |
| 81 | return -ENOMEM; |
| 82 | if (map_page_size == PMD_SIZE) { |
Reza Arbab | a0615a1 | 2017-01-25 09:54:33 -0600 | [diff] [blame] | 83 | ptep = pmdp_ptep(pmdp); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 84 | goto set_the_pte; |
| 85 | } |
| 86 | ptep = pte_alloc_kernel(pmdp, ea); |
| 87 | if (!ptep) |
| 88 | return -ENOMEM; |
| 89 | } else { |
| 90 | pgdp = pgd_offset_k(ea); |
| 91 | if (pgd_none(*pgdp)) { |
| 92 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); |
| 93 | BUG_ON(pudp == NULL); |
| 94 | pgd_populate(&init_mm, pgdp, pudp); |
| 95 | } |
| 96 | pudp = pud_offset(pgdp, ea); |
| 97 | if (map_page_size == PUD_SIZE) { |
| 98 | ptep = (pte_t *)pudp; |
| 99 | goto set_the_pte; |
| 100 | } |
| 101 | if (pud_none(*pudp)) { |
| 102 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); |
| 103 | BUG_ON(pmdp == NULL); |
| 104 | pud_populate(&init_mm, pudp, pmdp); |
| 105 | } |
| 106 | pmdp = pmd_offset(pudp, ea); |
| 107 | if (map_page_size == PMD_SIZE) { |
Reza Arbab | a0615a1 | 2017-01-25 09:54:33 -0600 | [diff] [blame] | 108 | ptep = pmdp_ptep(pmdp); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 109 | goto set_the_pte; |
| 110 | } |
| 111 | if (!pmd_present(*pmdp)) { |
| 112 | ptep = early_alloc_pgtable(PAGE_SIZE); |
| 113 | BUG_ON(ptep == NULL); |
| 114 | pmd_populate_kernel(&init_mm, pmdp, ptep); |
| 115 | } |
| 116 | ptep = pte_offset_kernel(pmdp, ea); |
| 117 | } |
| 118 | |
| 119 | set_the_pte: |
| 120 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, flags)); |
| 121 | smp_wmb(); |
| 122 | return 0; |
| 123 | } |
| 124 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 125 | #ifdef CONFIG_STRICT_KERNEL_RWX |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 126 | void radix__change_memory_range(unsigned long start, unsigned long end, |
| 127 | unsigned long clear) |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 128 | { |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 129 | unsigned long idx; |
| 130 | pgd_t *pgdp; |
| 131 | pud_t *pudp; |
| 132 | pmd_t *pmdp; |
| 133 | pte_t *ptep; |
| 134 | |
| 135 | start = ALIGN_DOWN(start, PAGE_SIZE); |
| 136 | end = PAGE_ALIGN(end); // aligns up |
| 137 | |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 138 | pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n", |
| 139 | start, end, clear); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 140 | |
| 141 | for (idx = start; idx < end; idx += PAGE_SIZE) { |
| 142 | pgdp = pgd_offset_k(idx); |
| 143 | pudp = pud_alloc(&init_mm, pgdp, idx); |
| 144 | if (!pudp) |
| 145 | continue; |
| 146 | if (pud_huge(*pudp)) { |
| 147 | ptep = (pte_t *)pudp; |
| 148 | goto update_the_pte; |
| 149 | } |
| 150 | pmdp = pmd_alloc(&init_mm, pudp, idx); |
| 151 | if (!pmdp) |
| 152 | continue; |
| 153 | if (pmd_huge(*pmdp)) { |
| 154 | ptep = pmdp_ptep(pmdp); |
| 155 | goto update_the_pte; |
| 156 | } |
| 157 | ptep = pte_alloc_kernel(pmdp, idx); |
| 158 | if (!ptep) |
| 159 | continue; |
| 160 | update_the_pte: |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 161 | radix__pte_update(&init_mm, idx, ptep, clear, 0, 0); |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | radix__flush_tlb_kernel_range(start, end); |
| 165 | } |
Michael Ellerman | b134bd9 | 2017-07-14 16:51:21 +1000 | [diff] [blame] | 166 | |
| 167 | void radix__mark_rodata_ro(void) |
| 168 | { |
| 169 | unsigned long start, end; |
| 170 | |
| 171 | start = (unsigned long)_stext; |
| 172 | end = (unsigned long)__init_begin; |
| 173 | |
| 174 | radix__change_memory_range(start, end, _PAGE_WRITE); |
| 175 | } |
Michael Ellerman | 029d925 | 2017-07-14 16:51:23 +1000 | [diff] [blame] | 176 | |
| 177 | void radix__mark_initmem_nx(void) |
| 178 | { |
| 179 | unsigned long start = (unsigned long)__init_begin; |
| 180 | unsigned long end = (unsigned long)__init_end; |
| 181 | |
| 182 | radix__change_memory_range(start, end, _PAGE_EXEC); |
| 183 | } |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 184 | #endif /* CONFIG_STRICT_KERNEL_RWX */ |
| 185 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 186 | static inline void __meminit print_mapping(unsigned long start, |
| 187 | unsigned long end, |
| 188 | unsigned long size) |
| 189 | { |
| 190 | if (end <= start) |
| 191 | return; |
| 192 | |
| 193 | pr_info("Mapped range 0x%lx - 0x%lx with 0x%lx\n", start, end, size); |
| 194 | } |
| 195 | |
| 196 | static int __meminit create_physical_mapping(unsigned long start, |
| 197 | unsigned long end) |
| 198 | { |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 199 | unsigned long vaddr, addr, mapping_size = 0; |
| 200 | pgprot_t prot; |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 201 | unsigned long max_mapping_size; |
| 202 | #ifdef CONFIG_STRICT_KERNEL_RWX |
| 203 | int split_text_mapping = 1; |
| 204 | #else |
| 205 | int split_text_mapping = 0; |
| 206 | #endif |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 207 | |
| 208 | start = _ALIGN_UP(start, PAGE_SIZE); |
| 209 | for (addr = start; addr < end; addr += mapping_size) { |
| 210 | unsigned long gap, previous_size; |
| 211 | int rc; |
| 212 | |
| 213 | gap = end - addr; |
| 214 | previous_size = mapping_size; |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 215 | max_mapping_size = PUD_SIZE; |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 216 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 217 | retry: |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 218 | if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 219 | mmu_psize_defs[MMU_PAGE_1G].shift && |
| 220 | PUD_SIZE <= max_mapping_size) |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 221 | mapping_size = PUD_SIZE; |
| 222 | else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && |
| 223 | mmu_psize_defs[MMU_PAGE_2M].shift) |
| 224 | mapping_size = PMD_SIZE; |
| 225 | else |
| 226 | mapping_size = PAGE_SIZE; |
| 227 | |
Balbir Singh | 7614ff3 | 2017-06-29 03:04:09 +1000 | [diff] [blame] | 228 | if (split_text_mapping && (mapping_size == PUD_SIZE) && |
| 229 | (addr <= __pa_symbol(__init_begin)) && |
| 230 | (addr + mapping_size) >= __pa_symbol(_stext)) { |
| 231 | max_mapping_size = PMD_SIZE; |
| 232 | goto retry; |
| 233 | } |
| 234 | |
| 235 | if (split_text_mapping && (mapping_size == PMD_SIZE) && |
| 236 | (addr <= __pa_symbol(__init_begin)) && |
| 237 | (addr + mapping_size) >= __pa_symbol(_stext)) |
| 238 | mapping_size = PAGE_SIZE; |
| 239 | |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 240 | if (mapping_size != previous_size) { |
| 241 | print_mapping(start, addr, previous_size); |
| 242 | start = addr; |
| 243 | } |
| 244 | |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 245 | vaddr = (unsigned long)__va(addr); |
| 246 | |
Balbir Singh | 7f6d498 | 2017-06-29 03:04:10 +1000 | [diff] [blame] | 247 | if (overlaps_kernel_text(vaddr, vaddr + mapping_size) || |
| 248 | overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) |
Michael Ellerman | 9abcc98 | 2017-06-06 15:48:57 +1000 | [diff] [blame] | 249 | prot = PAGE_KERNEL_X; |
| 250 | else |
| 251 | prot = PAGE_KERNEL; |
| 252 | |
| 253 | rc = radix__map_kernel_page(vaddr, addr, prot, mapping_size); |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 254 | if (rc) |
| 255 | return rc; |
| 256 | } |
| 257 | |
| 258 | print_mapping(start, addr, mapping_size); |
| 259 | return 0; |
| 260 | } |
| 261 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 262 | static void __init radix_init_pgtable(void) |
| 263 | { |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 264 | unsigned long rts_field; |
| 265 | struct memblock_region *reg; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 266 | |
| 267 | /* We don't support slb for radix */ |
| 268 | mmu_slb_size = 0; |
| 269 | /* |
| 270 | * Create the linear mapping, using standard page size for now |
| 271 | */ |
Reza Arbab | b5200ec | 2017-01-16 13:07:43 -0600 | [diff] [blame] | 272 | for_each_memblock(memory, reg) |
| 273 | WARN_ON(create_physical_mapping(reg->base, |
| 274 | reg->base + reg->size)); |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 275 | |
| 276 | /* Find out how many PID bits are supported */ |
| 277 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
| 278 | if (!mmu_pid_bits) |
| 279 | mmu_pid_bits = 20; |
| 280 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
| 281 | /* |
| 282 | * When KVM is possible, we only use the top half of the |
| 283 | * PID space to avoid collisions between host and guest PIDs |
| 284 | * which can cause problems due to prefetch when exiting the |
| 285 | * guest with AIL=3 |
| 286 | */ |
| 287 | mmu_base_pid = 1 << (mmu_pid_bits - 1); |
| 288 | #else |
| 289 | mmu_base_pid = 1; |
| 290 | #endif |
| 291 | } else { |
| 292 | /* The guest uses the bottom half of the PID space */ |
| 293 | if (!mmu_pid_bits) |
| 294 | mmu_pid_bits = 19; |
| 295 | mmu_base_pid = 1; |
| 296 | } |
| 297 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 298 | /* |
| 299 | * Allocate Partition table and process table for the |
| 300 | * host. |
| 301 | */ |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 302 | BUG_ON(PRTB_SIZE_SHIFT > 36); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 303 | process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT); |
| 304 | /* |
| 305 | * Fill in the process table. |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 306 | */ |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 307 | rts_field = radix__get_tree_size(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 308 | process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE); |
| 309 | /* |
| 310 | * Fill in the partition table. We are suppose to use effective address |
| 311 | * of process table here. But our linear mapping also enable us to use |
| 312 | * physical address here. |
| 313 | */ |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 314 | register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 315 | pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); |
Paul Mackerras | 7a70d72 | 2017-02-27 14:32:41 +1100 | [diff] [blame] | 316 | asm volatile("ptesync" : : : "memory"); |
| 317 | asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : |
| 318 | "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); |
| 319 | asm volatile("eieio; tlbsync; ptesync" : : : "memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 320 | trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | static void __init radix_init_partition_table(void) |
| 324 | { |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 325 | unsigned long rts_field, dw0; |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 326 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 327 | mmu_partition_table_init(); |
Aneesh Kumar K.V | b23d9c5 | 2016-06-17 11:40:36 +0530 | [diff] [blame] | 328 | rts_field = radix__get_tree_size(); |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 329 | dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR; |
| 330 | mmu_partition_table_set_entry(0, dw0, 0); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 331 | |
Aneesh Kumar K.V | 5654741 | 2016-07-13 15:05:25 +0530 | [diff] [blame] | 332 | pr_info("Initializing Radix MMU\n"); |
| 333 | pr_info("Partition table %p\n", partition_tb); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 334 | } |
| 335 | |
| 336 | void __init radix_init_native(void) |
| 337 | { |
Michael Ellerman | eea8148 | 2016-08-04 15:32:06 +1000 | [diff] [blame] | 338 | register_process_table = native_register_process_table; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | static int __init get_idx_from_shift(unsigned int shift) |
| 342 | { |
| 343 | int idx = -1; |
| 344 | |
| 345 | switch (shift) { |
| 346 | case 0xc: |
| 347 | idx = MMU_PAGE_4K; |
| 348 | break; |
| 349 | case 0x10: |
| 350 | idx = MMU_PAGE_64K; |
| 351 | break; |
| 352 | case 0x15: |
| 353 | idx = MMU_PAGE_2M; |
| 354 | break; |
| 355 | case 0x1e: |
| 356 | idx = MMU_PAGE_1G; |
| 357 | break; |
| 358 | } |
| 359 | return idx; |
| 360 | } |
| 361 | |
| 362 | static int __init radix_dt_scan_page_sizes(unsigned long node, |
| 363 | const char *uname, int depth, |
| 364 | void *data) |
| 365 | { |
| 366 | int size = 0; |
| 367 | int shift, idx; |
| 368 | unsigned int ap; |
| 369 | const __be32 *prop; |
| 370 | const char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
| 371 | |
| 372 | /* We are scanning "cpu" nodes only */ |
| 373 | if (type == NULL || strcmp(type, "cpu") != 0) |
| 374 | return 0; |
| 375 | |
Benjamin Herrenschmidt | a25bd72 | 2017-07-24 14:26:06 +1000 | [diff] [blame] | 376 | /* Find MMU PID size */ |
| 377 | prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size); |
| 378 | if (prop && size == 4) |
| 379 | mmu_pid_bits = be32_to_cpup(prop); |
| 380 | |
| 381 | /* Grab page size encodings */ |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 382 | prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size); |
| 383 | if (!prop) |
| 384 | return 0; |
| 385 | |
| 386 | pr_info("Page sizes from device-tree:\n"); |
| 387 | for (; size >= 4; size -= 4, ++prop) { |
| 388 | |
| 389 | struct mmu_psize_def *def; |
| 390 | |
| 391 | /* top 3 bit is AP encoding */ |
| 392 | shift = be32_to_cpu(prop[0]) & ~(0xe << 28); |
| 393 | ap = be32_to_cpu(prop[0]) >> 29; |
Balbir Singh | ac8d381 | 2016-11-05 15:24:22 +1100 | [diff] [blame] | 394 | pr_info("Page size shift = %d AP=0x%x\n", shift, ap); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 395 | |
| 396 | idx = get_idx_from_shift(shift); |
| 397 | if (idx < 0) |
| 398 | continue; |
| 399 | |
| 400 | def = &mmu_psize_defs[idx]; |
| 401 | def->shift = shift; |
| 402 | def->ap = ap; |
| 403 | } |
| 404 | |
| 405 | /* needed ? */ |
| 406 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; |
| 407 | return 1; |
| 408 | } |
| 409 | |
Michael Ellerman | 2537b09 | 2016-07-26 21:55:27 +1000 | [diff] [blame] | 410 | void __init radix__early_init_devtree(void) |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 411 | { |
| 412 | int rc; |
| 413 | |
| 414 | /* |
| 415 | * Try to find the available page sizes in the device-tree |
| 416 | */ |
| 417 | rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL); |
| 418 | if (rc != 0) /* Found */ |
| 419 | goto found; |
| 420 | /* |
| 421 | * let's assume we have page 4k and 64k support |
| 422 | */ |
| 423 | mmu_psize_defs[MMU_PAGE_4K].shift = 12; |
| 424 | mmu_psize_defs[MMU_PAGE_4K].ap = 0x0; |
| 425 | |
| 426 | mmu_psize_defs[MMU_PAGE_64K].shift = 16; |
| 427 | mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; |
| 428 | found: |
| 429 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 430 | if (mmu_psize_defs[MMU_PAGE_2M].shift) { |
| 431 | /* |
| 432 | * map vmemmap using 2M if available |
| 433 | */ |
| 434 | mmu_vmemmap_psize = MMU_PAGE_2M; |
| 435 | } |
| 436 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 437 | return; |
| 438 | } |
| 439 | |
Aneesh Kumar K.V | ad41067 | 2016-08-24 15:03:39 +0530 | [diff] [blame] | 440 | static void update_hid_for_radix(void) |
| 441 | { |
| 442 | unsigned long hid0; |
| 443 | unsigned long rb = 3UL << PPC_BITLSHIFT(53); /* IS = 3 */ |
| 444 | |
| 445 | asm volatile("ptesync": : :"memory"); |
| 446 | /* prs = 0, ric = 2, rs = 0, r = 1 is = 3 */ |
| 447 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 448 | : : "r"(rb), "i"(1), "i"(0), "i"(2), "r"(0) : "memory"); |
| 449 | /* prs = 1, ric = 2, rs = 0, r = 1 is = 3 */ |
| 450 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
| 451 | : : "r"(rb), "i"(1), "i"(1), "i"(2), "r"(0) : "memory"); |
| 452 | asm volatile("eieio; tlbsync; ptesync; isync; slbia": : :"memory"); |
Balbir Singh | 0428491 | 2017-04-11 15:23:25 +1000 | [diff] [blame] | 453 | trace_tlbie(0, 0, rb, 0, 2, 0, 1); |
| 454 | trace_tlbie(0, 0, rb, 0, 2, 1, 1); |
| 455 | |
Aneesh Kumar K.V | ad41067 | 2016-08-24 15:03:39 +0530 | [diff] [blame] | 456 | /* |
| 457 | * now switch the HID |
| 458 | */ |
| 459 | hid0 = mfspr(SPRN_HID0); |
| 460 | hid0 |= HID0_POWER9_RADIX; |
| 461 | mtspr(SPRN_HID0, hid0); |
| 462 | asm volatile("isync": : :"memory"); |
| 463 | |
| 464 | /* Wait for it to happen */ |
| 465 | while (!(mfspr(SPRN_HID0) & HID0_POWER9_RADIX)) |
| 466 | cpu_relax(); |
| 467 | } |
| 468 | |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 469 | static void radix_init_amor(void) |
| 470 | { |
| 471 | /* |
| 472 | * In HV mode, we init AMOR (Authority Mask Override Register) so that |
| 473 | * the hypervisor and guest can setup IAMR (Instruction Authority Mask |
| 474 | * Register), enable key 0 and set it to 1. |
| 475 | * |
| 476 | * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11) |
| 477 | */ |
| 478 | mtspr(SPRN_AMOR, (3ul << 62)); |
| 479 | } |
| 480 | |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 481 | static void radix_init_iamr(void) |
| 482 | { |
| 483 | unsigned long iamr; |
| 484 | |
| 485 | /* |
| 486 | * The IAMR should set to 0 on DD1. |
| 487 | */ |
| 488 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 489 | iamr = 0; |
| 490 | else |
| 491 | iamr = (1ul << 62); |
| 492 | |
| 493 | /* |
| 494 | * Radix always uses key0 of the IAMR to determine if an access is |
| 495 | * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction |
| 496 | * fetch. |
| 497 | */ |
| 498 | mtspr(SPRN_IAMR, iamr); |
| 499 | } |
| 500 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 501 | void __init radix__early_init_mmu(void) |
| 502 | { |
| 503 | unsigned long lpcr; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 504 | |
| 505 | #ifdef CONFIG_PPC_64K_PAGES |
| 506 | /* PAGE_SIZE mappings */ |
| 507 | mmu_virtual_psize = MMU_PAGE_64K; |
| 508 | #else |
| 509 | mmu_virtual_psize = MMU_PAGE_4K; |
| 510 | #endif |
| 511 | |
| 512 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 513 | /* vmemmap mapping */ |
| 514 | mmu_vmemmap_psize = mmu_virtual_psize; |
| 515 | #endif |
| 516 | /* |
| 517 | * initialize page table size |
| 518 | */ |
| 519 | __pte_index_size = RADIX_PTE_INDEX_SIZE; |
| 520 | __pmd_index_size = RADIX_PMD_INDEX_SIZE; |
| 521 | __pud_index_size = RADIX_PUD_INDEX_SIZE; |
| 522 | __pgd_index_size = RADIX_PGD_INDEX_SIZE; |
| 523 | __pmd_cache_index = RADIX_PMD_INDEX_SIZE; |
| 524 | __pte_table_size = RADIX_PTE_TABLE_SIZE; |
| 525 | __pmd_table_size = RADIX_PMD_TABLE_SIZE; |
| 526 | __pud_table_size = RADIX_PUD_TABLE_SIZE; |
| 527 | __pgd_table_size = RADIX_PGD_TABLE_SIZE; |
| 528 | |
Aneesh Kumar K.V | a2f41eb | 2016-04-29 23:26:19 +1000 | [diff] [blame] | 529 | __pmd_val_bits = RADIX_PMD_VAL_BITS; |
| 530 | __pud_val_bits = RADIX_PUD_VAL_BITS; |
| 531 | __pgd_val_bits = RADIX_PGD_VAL_BITS; |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 532 | |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 533 | __kernel_virt_start = RADIX_KERN_VIRT_START; |
| 534 | __kernel_virt_size = RADIX_KERN_VIRT_SIZE; |
| 535 | __vmalloc_start = RADIX_VMALLOC_START; |
| 536 | __vmalloc_end = RADIX_VMALLOC_END; |
Michael Ellerman | 63ee9b2 | 2017-08-01 20:29:22 +1000 | [diff] [blame] | 537 | __kernel_io_start = RADIX_KERN_IO_START; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 538 | vmemmap = (struct page *)RADIX_VMEMMAP_BASE; |
| 539 | ioremap_bot = IOREMAP_BASE; |
Darren Stevens | bfa3708 | 2016-06-29 21:06:28 +0100 | [diff] [blame] | 540 | |
| 541 | #ifdef CONFIG_PCI |
| 542 | pci_io_base = ISA_IO_BASE; |
| 543 | #endif |
| 544 | |
Aneesh Kumar K.V | 5ed7ecd | 2016-04-29 23:26:23 +1000 | [diff] [blame] | 545 | /* |
| 546 | * For now radix also use the same frag size |
| 547 | */ |
| 548 | __pte_frag_nr = H_PTE_FRAG_NR; |
| 549 | __pte_frag_size_shift = H_PTE_FRAG_SIZE_SHIFT; |
Aneesh Kumar K.V | d6a9996 | 2016-04-29 23:26:21 +1000 | [diff] [blame] | 550 | |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 551 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
Benjamin Herrenschmidt | 166dd7d | 2016-07-05 15:03:51 +1000 | [diff] [blame] | 552 | radix_init_native(); |
Aneesh Kumar K.V | ad41067 | 2016-08-24 15:03:39 +0530 | [diff] [blame] | 553 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 554 | update_hid_for_radix(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 555 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 556 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 557 | radix_init_partition_table(); |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 558 | radix_init_amor(); |
Paul Mackerras | cc3d294 | 2017-01-30 21:21:36 +1100 | [diff] [blame] | 559 | } else { |
| 560 | radix_init_pseries(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 561 | } |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 562 | |
Paul Mackerras | 9d66195 | 2016-11-21 16:00:58 +1100 | [diff] [blame] | 563 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); |
| 564 | |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 565 | radix_init_iamr(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 566 | radix_init_pgtable(); |
| 567 | } |
| 568 | |
| 569 | void radix__early_init_mmu_secondary(void) |
| 570 | { |
| 571 | unsigned long lpcr; |
| 572 | /* |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 573 | * update partition table control register and UPRT |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 574 | */ |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 575 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
Aneesh Kumar K.V | cac4a18 | 2016-11-17 15:46:23 +0530 | [diff] [blame] | 576 | |
| 577 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) |
| 578 | update_hid_for_radix(); |
| 579 | |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 580 | lpcr = mfspr(SPRN_LPCR); |
Aneesh Kumar K.V | bf16cdf | 2016-07-13 15:05:21 +0530 | [diff] [blame] | 581 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 582 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 583 | mtspr(SPRN_PTCR, |
| 584 | __pa(partition_tb) | (PATB_SIZE_SHIFT - 12)); |
Balbir Singh | ee97b6b | 2016-11-15 17:56:14 +1100 | [diff] [blame] | 585 | radix_init_amor(); |
Aneesh Kumar K.V | d6c8860 | 2016-05-31 11:56:29 +0530 | [diff] [blame] | 586 | } |
Balbir Singh | 3b10d00 | 2016-11-15 17:56:16 +1100 | [diff] [blame] | 587 | radix_init_iamr(); |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 588 | } |
| 589 | |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 590 | void radix__mmu_cleanup_all(void) |
| 591 | { |
| 592 | unsigned long lpcr; |
| 593 | |
| 594 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
| 595 | lpcr = mfspr(SPRN_LPCR); |
| 596 | mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT); |
| 597 | mtspr(SPRN_PTCR, 0); |
Alistair Popple | 1d0761d | 2016-12-14 13:36:51 +1100 | [diff] [blame] | 598 | powernv_set_nmmu_ptcr(0); |
Benjamin Herrenschmidt | fe036a0 | 2016-08-19 14:22:37 +0530 | [diff] [blame] | 599 | radix__flush_tlb_all(); |
| 600 | } |
| 601 | } |
| 602 | |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 603 | void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, |
| 604 | phys_addr_t first_memblock_size) |
| 605 | { |
Aneesh Kumar K.V | 177ba7c | 2016-04-29 23:26:10 +1000 | [diff] [blame] | 606 | /* We don't currently support the first MEMBLOCK not mapping 0 |
| 607 | * physical on those processors |
| 608 | */ |
| 609 | BUG_ON(first_memblock_base != 0); |
| 610 | /* |
| 611 | * We limit the allocation that depend on ppc64_rma_size |
| 612 | * to first_memblock_size. We also clamp it to 1GB to |
| 613 | * avoid some funky things such as RTAS bugs. |
| 614 | * |
| 615 | * On radix config we really don't have a limitation |
| 616 | * on real mode access. But keeping it as above works |
| 617 | * well enough. |
| 618 | */ |
| 619 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); |
| 620 | /* |
| 621 | * Finally limit subsequent allocations. We really don't want |
| 622 | * to limit the memblock allocations to rma_size. FIXME!! should |
| 623 | * we even limit at all ? |
| 624 | */ |
Aneesh Kumar K.V | 2bfd65e | 2016-04-29 23:25:58 +1000 | [diff] [blame] | 625 | memblock_set_current_limit(first_memblock_base + first_memblock_size); |
| 626 | } |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 627 | |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 628 | #ifdef CONFIG_MEMORY_HOTPLUG |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 629 | static void free_pte_table(pte_t *pte_start, pmd_t *pmd) |
| 630 | { |
| 631 | pte_t *pte; |
| 632 | int i; |
| 633 | |
| 634 | for (i = 0; i < PTRS_PER_PTE; i++) { |
| 635 | pte = pte_start + i; |
| 636 | if (!pte_none(*pte)) |
| 637 | return; |
| 638 | } |
| 639 | |
| 640 | pte_free_kernel(&init_mm, pte_start); |
| 641 | pmd_clear(pmd); |
| 642 | } |
| 643 | |
| 644 | static void free_pmd_table(pmd_t *pmd_start, pud_t *pud) |
| 645 | { |
| 646 | pmd_t *pmd; |
| 647 | int i; |
| 648 | |
| 649 | for (i = 0; i < PTRS_PER_PMD; i++) { |
| 650 | pmd = pmd_start + i; |
| 651 | if (!pmd_none(*pmd)) |
| 652 | return; |
| 653 | } |
| 654 | |
| 655 | pmd_free(&init_mm, pmd_start); |
| 656 | pud_clear(pud); |
| 657 | } |
| 658 | |
| 659 | static void remove_pte_table(pte_t *pte_start, unsigned long addr, |
| 660 | unsigned long end) |
| 661 | { |
| 662 | unsigned long next; |
| 663 | pte_t *pte; |
| 664 | |
| 665 | pte = pte_start + pte_index(addr); |
| 666 | for (; addr < end; addr = next, pte++) { |
| 667 | next = (addr + PAGE_SIZE) & PAGE_MASK; |
| 668 | if (next > end) |
| 669 | next = end; |
| 670 | |
| 671 | if (!pte_present(*pte)) |
| 672 | continue; |
| 673 | |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 674 | if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(next)) { |
| 675 | /* |
| 676 | * The vmemmap_free() and remove_section_mapping() |
| 677 | * codepaths call us with aligned addresses. |
| 678 | */ |
| 679 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 680 | continue; |
| 681 | } |
| 682 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 683 | pte_clear(&init_mm, addr, pte); |
| 684 | } |
| 685 | } |
| 686 | |
| 687 | static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr, |
| 688 | unsigned long end) |
| 689 | { |
| 690 | unsigned long next; |
| 691 | pte_t *pte_base; |
| 692 | pmd_t *pmd; |
| 693 | |
| 694 | pmd = pmd_start + pmd_index(addr); |
| 695 | for (; addr < end; addr = next, pmd++) { |
| 696 | next = pmd_addr_end(addr, end); |
| 697 | |
| 698 | if (!pmd_present(*pmd)) |
| 699 | continue; |
| 700 | |
| 701 | if (pmd_huge(*pmd)) { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 702 | if (!IS_ALIGNED(addr, PMD_SIZE) || |
| 703 | !IS_ALIGNED(next, PMD_SIZE)) { |
| 704 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 705 | continue; |
| 706 | } |
| 707 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 708 | pte_clear(&init_mm, addr, (pte_t *)pmd); |
| 709 | continue; |
| 710 | } |
| 711 | |
| 712 | pte_base = (pte_t *)pmd_page_vaddr(*pmd); |
| 713 | remove_pte_table(pte_base, addr, next); |
| 714 | free_pte_table(pte_base, pmd); |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | static void remove_pud_table(pud_t *pud_start, unsigned long addr, |
| 719 | unsigned long end) |
| 720 | { |
| 721 | unsigned long next; |
| 722 | pmd_t *pmd_base; |
| 723 | pud_t *pud; |
| 724 | |
| 725 | pud = pud_start + pud_index(addr); |
| 726 | for (; addr < end; addr = next, pud++) { |
| 727 | next = pud_addr_end(addr, end); |
| 728 | |
| 729 | if (!pud_present(*pud)) |
| 730 | continue; |
| 731 | |
| 732 | if (pud_huge(*pud)) { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 733 | if (!IS_ALIGNED(addr, PUD_SIZE) || |
| 734 | !IS_ALIGNED(next, PUD_SIZE)) { |
| 735 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 736 | continue; |
| 737 | } |
| 738 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 739 | pte_clear(&init_mm, addr, (pte_t *)pud); |
| 740 | continue; |
| 741 | } |
| 742 | |
| 743 | pmd_base = (pmd_t *)pud_page_vaddr(*pud); |
| 744 | remove_pmd_table(pmd_base, addr, next); |
| 745 | free_pmd_table(pmd_base, pud); |
| 746 | } |
| 747 | } |
| 748 | |
| 749 | static void remove_pagetable(unsigned long start, unsigned long end) |
| 750 | { |
| 751 | unsigned long addr, next; |
| 752 | pud_t *pud_base; |
| 753 | pgd_t *pgd; |
| 754 | |
| 755 | spin_lock(&init_mm.page_table_lock); |
| 756 | |
| 757 | for (addr = start; addr < end; addr = next) { |
| 758 | next = pgd_addr_end(addr, end); |
| 759 | |
| 760 | pgd = pgd_offset_k(addr); |
| 761 | if (!pgd_present(*pgd)) |
| 762 | continue; |
| 763 | |
| 764 | if (pgd_huge(*pgd)) { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 765 | if (!IS_ALIGNED(addr, PGDIR_SIZE) || |
| 766 | !IS_ALIGNED(next, PGDIR_SIZE)) { |
| 767 | WARN_ONCE(1, "%s: unaligned range\n", __func__); |
| 768 | continue; |
| 769 | } |
| 770 | |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 771 | pte_clear(&init_mm, addr, (pte_t *)pgd); |
| 772 | continue; |
| 773 | } |
| 774 | |
| 775 | pud_base = (pud_t *)pgd_page_vaddr(*pgd); |
| 776 | remove_pud_table(pud_base, addr, next); |
| 777 | } |
| 778 | |
| 779 | spin_unlock(&init_mm.page_table_lock); |
| 780 | radix__flush_tlb_kernel_range(start, end); |
| 781 | } |
| 782 | |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 783 | int __ref radix__create_section_mapping(unsigned long start, unsigned long end) |
| 784 | { |
| 785 | return create_physical_mapping(start, end); |
| 786 | } |
Reza Arbab | 4b5d62c | 2017-01-16 13:07:45 -0600 | [diff] [blame] | 787 | |
| 788 | int radix__remove_section_mapping(unsigned long start, unsigned long end) |
| 789 | { |
| 790 | remove_pagetable(start, end); |
| 791 | return 0; |
| 792 | } |
Reza Arbab | 6cc2734 | 2017-01-16 13:07:44 -0600 | [diff] [blame] | 793 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 794 | |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 795 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 796 | int __meminit radix__vmemmap_create_mapping(unsigned long start, |
| 797 | unsigned long page_size, |
| 798 | unsigned long phys) |
| 799 | { |
| 800 | /* Create a PTE encoding */ |
| 801 | unsigned long flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; |
| 802 | |
| 803 | BUG_ON(radix__map_kernel_page(start, phys, __pgprot(flags), page_size)); |
| 804 | return 0; |
| 805 | } |
| 806 | |
| 807 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 808 | void radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size) |
| 809 | { |
Reza Arbab | 0d0a4bc | 2017-01-16 13:07:46 -0600 | [diff] [blame] | 810 | remove_pagetable(start, start + page_size); |
Aneesh Kumar K.V | d9225ad | 2016-04-29 23:26:00 +1000 | [diff] [blame] | 811 | } |
| 812 | #endif |
| 813 | #endif |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 814 | |
| 815 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
| 816 | |
| 817 | unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, |
| 818 | pmd_t *pmdp, unsigned long clr, |
| 819 | unsigned long set) |
| 820 | { |
| 821 | unsigned long old; |
| 822 | |
| 823 | #ifdef CONFIG_DEBUG_VM |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 824 | WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 825 | assert_spin_locked(&mm->page_table_lock); |
| 826 | #endif |
| 827 | |
| 828 | old = radix__pte_update(mm, addr, (pte_t *)pmdp, clr, set, 1); |
| 829 | trace_hugepage_update(addr, old, clr, set); |
| 830 | |
| 831 | return old; |
| 832 | } |
| 833 | |
| 834 | pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, |
| 835 | pmd_t *pmdp) |
| 836 | |
| 837 | { |
| 838 | pmd_t pmd; |
| 839 | |
| 840 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 841 | VM_BUG_ON(radix__pmd_trans_huge(*pmdp)); |
Oliver O'Halloran | ebd3119 | 2017-06-28 11:32:34 +1000 | [diff] [blame] | 842 | VM_BUG_ON(pmd_devmap(*pmdp)); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 843 | /* |
| 844 | * khugepaged calls this for normal pmd |
| 845 | */ |
| 846 | pmd = *pmdp; |
| 847 | pmd_clear(pmdp); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 848 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 849 | /*FIXME!! Verify whether we need this kick below */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 850 | serialize_against_pte_lookup(vma->vm_mm); |
Benjamin Herrenschmidt | 424de9c | 2017-07-19 14:49:06 +1000 | [diff] [blame] | 851 | |
| 852 | radix__flush_tlb_collapsed_pmd(vma->vm_mm, address); |
| 853 | |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 854 | return pmd; |
| 855 | } |
| 856 | |
| 857 | /* |
| 858 | * For us pgtable_t is pte_t *. Inorder to save the deposisted |
| 859 | * page table, we consider the allocated page table as a list |
| 860 | * head. On withdraw we need to make sure we zero out the used |
| 861 | * list_head memory area. |
| 862 | */ |
| 863 | void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, |
| 864 | pgtable_t pgtable) |
| 865 | { |
| 866 | struct list_head *lh = (struct list_head *) pgtable; |
| 867 | |
| 868 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
| 869 | |
| 870 | /* FIFO */ |
| 871 | if (!pmd_huge_pte(mm, pmdp)) |
| 872 | INIT_LIST_HEAD(lh); |
| 873 | else |
| 874 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); |
| 875 | pmd_huge_pte(mm, pmdp) = pgtable; |
| 876 | } |
| 877 | |
| 878 | pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) |
| 879 | { |
| 880 | pte_t *ptep; |
| 881 | pgtable_t pgtable; |
| 882 | struct list_head *lh; |
| 883 | |
| 884 | assert_spin_locked(pmd_lockptr(mm, pmdp)); |
| 885 | |
| 886 | /* FIFO */ |
| 887 | pgtable = pmd_huge_pte(mm, pmdp); |
| 888 | lh = (struct list_head *) pgtable; |
| 889 | if (list_empty(lh)) |
| 890 | pmd_huge_pte(mm, pmdp) = NULL; |
| 891 | else { |
| 892 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; |
| 893 | list_del(lh); |
| 894 | } |
| 895 | ptep = (pte_t *) pgtable; |
| 896 | *ptep = __pte(0); |
| 897 | ptep++; |
| 898 | *ptep = __pte(0); |
| 899 | return pgtable; |
| 900 | } |
| 901 | |
| 902 | |
| 903 | pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm, |
| 904 | unsigned long addr, pmd_t *pmdp) |
| 905 | { |
| 906 | pmd_t old_pmd; |
| 907 | unsigned long old; |
| 908 | |
| 909 | old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); |
| 910 | old_pmd = __pmd(old); |
| 911 | /* |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 912 | * Serialize against find_current_mm_pte which does lock-less |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 913 | * lookup in page tables with local interrupts disabled. For huge pages |
| 914 | * it casts pmd_t to pte_t. Since format of pte_t is different from |
| 915 | * pmd_t we want to prevent transit from pmd pointing to page table |
| 916 | * to pmd pointing to huge page (and back) while interrupts are disabled. |
| 917 | * We clear pmd to possibly replace it with page table pointer in |
| 918 | * different code paths. So make sure we wait for the parallel |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 919 | * find_current_mm_pte to finish. |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 920 | */ |
Aneesh Kumar K.V | fa4531f | 2017-07-27 11:54:54 +0530 | [diff] [blame] | 921 | serialize_against_pte_lookup(mm); |
Aneesh Kumar K.V | bde3eb6 | 2016-04-29 23:26:30 +1000 | [diff] [blame] | 922 | return old_pmd; |
| 923 | } |
| 924 | |
| 925 | int radix__has_transparent_hugepage(void) |
| 926 | { |
| 927 | /* For radix 2M at PMD level means thp */ |
| 928 | if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT) |
| 929 | return 1; |
| 930 | return 0; |
| 931 | } |
| 932 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |