Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 2 | /* |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 3 | * Copyright IBM Corp. 2006 |
| 4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 5 | */ |
| 6 | |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 8 | #include <linux/pfn.h> |
| 9 | #include <linux/mm.h> |
Paul Gortmaker | ff24b07 | 2017-02-09 15:20:24 -0500 | [diff] [blame] | 10 | #include <linux/init.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 11 | #include <linux/list.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 12 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 13 | #include <linux/slab.h> |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 14 | #include <asm/cacheflush.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 15 | #include <asm/pgalloc.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 16 | #include <asm/setup.h> |
| 17 | #include <asm/tlbflush.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 18 | #include <asm/sections.h> |
Laura Abbott | e6c7c63 | 2017-05-08 15:58:08 -0700 | [diff] [blame] | 19 | #include <asm/set_memory.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 20 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 21 | static DEFINE_MUTEX(vmem_mutex); |
| 22 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 23 | static void __ref *vmem_alloc_pages(unsigned int order) |
| 24 | { |
Heiko Carstens | 2e9996f | 2016-05-13 11:10:09 +0200 | [diff] [blame] | 25 | unsigned long size = PAGE_SIZE << order; |
| 26 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 27 | if (slab_is_available()) |
| 28 | return (void *)__get_free_pages(GFP_KERNEL, order); |
Mike Rapoport | 9a8dd70 | 2018-10-30 15:07:59 -0700 | [diff] [blame] | 29 | return (void *) memblock_phys_alloc(size, size); |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 30 | } |
| 31 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 32 | static void vmem_free_pages(unsigned long addr, int order) |
| 33 | { |
| 34 | /* We don't expect boot memory to be removed ever. */ |
| 35 | if (!slab_is_available() || |
| 36 | WARN_ON_ONCE(PageReserved(phys_to_page(addr)))) |
| 37 | return; |
| 38 | free_pages(addr, order); |
| 39 | } |
| 40 | |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame] | 41 | void *vmem_crst_alloc(unsigned long val) |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 42 | { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame] | 43 | unsigned long *table; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 44 | |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame] | 45 | table = vmem_alloc_pages(CRST_ALLOC_ORDER); |
| 46 | if (table) |
| 47 | crst_table_init(table, val); |
| 48 | return table; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Heiko Carstens | e8a97e4 | 2016-05-17 10:50:15 +0200 | [diff] [blame] | 51 | pte_t __ref *vmem_pte_alloc(void) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 52 | { |
Heiko Carstens | 9e42736 | 2016-10-18 13:35:32 +0200 | [diff] [blame] | 53 | unsigned long size = PTRS_PER_PTE * sizeof(pte_t); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 54 | pte_t *pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 55 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 56 | if (slab_is_available()) |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 57 | pte = (pte_t *) page_table_alloc(&init_mm); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 58 | else |
Mike Rapoport | 9a8dd70 | 2018-10-30 15:07:59 -0700 | [diff] [blame] | 59 | pte = (pte_t *) memblock_phys_alloc(size, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 60 | if (!pte) |
| 61 | return NULL; |
Heiko Carstens | 41879ff | 2017-10-04 19:27:07 +0200 | [diff] [blame] | 62 | memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 63 | return pte; |
| 64 | } |
| 65 | |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 66 | static void vmem_pte_free(unsigned long *table) |
| 67 | { |
| 68 | /* We don't expect boot memory to be removed ever. */ |
| 69 | if (!slab_is_available() || |
| 70 | WARN_ON_ONCE(PageReserved(virt_to_page(table)))) |
| 71 | return; |
| 72 | page_table_free(&init_mm, table); |
| 73 | } |
| 74 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 75 | /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ |
| 76 | static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr, |
| 77 | unsigned long end, bool add, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 78 | { |
| 79 | unsigned long prot, pages = 0; |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 80 | int ret = -ENOMEM; |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 81 | pte_t *pte; |
| 82 | |
| 83 | prot = pgprot_val(PAGE_KERNEL); |
| 84 | if (!MACHINE_HAS_NX) |
| 85 | prot &= ~_PAGE_NOEXEC; |
| 86 | |
| 87 | pte = pte_offset_kernel(pmd, addr); |
| 88 | for (; addr < end; addr += PAGE_SIZE, pte++) { |
| 89 | if (!add) { |
| 90 | if (pte_none(*pte)) |
| 91 | continue; |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 92 | if (!direct) |
| 93 | vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 94 | pte_clear(&init_mm, addr, pte); |
| 95 | } else if (pte_none(*pte)) { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 96 | if (!direct) { |
| 97 | void *new_page = vmemmap_alloc_block(PAGE_SIZE, |
| 98 | NUMA_NO_NODE); |
| 99 | |
| 100 | if (!new_page) |
| 101 | goto out; |
| 102 | pte_val(*pte) = __pa(new_page) | prot; |
| 103 | } else |
| 104 | pte_val(*pte) = addr | prot; |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 105 | } else |
| 106 | continue; |
| 107 | |
| 108 | pages++; |
| 109 | } |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 110 | ret = 0; |
| 111 | out: |
| 112 | if (direct) |
| 113 | update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages); |
| 114 | return ret; |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 115 | } |
| 116 | |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 117 | static void try_free_pte_table(pmd_t *pmd, unsigned long start) |
| 118 | { |
| 119 | pte_t *pte; |
| 120 | int i; |
| 121 | |
| 122 | /* We can safely assume this is fully in 1:1 mapping & vmemmap area */ |
| 123 | pte = pte_offset_kernel(pmd, start); |
| 124 | for (i = 0; i < PTRS_PER_PTE; i++, pte++) |
| 125 | if (!pte_none(*pte)) |
| 126 | return; |
| 127 | |
| 128 | vmem_pte_free(__va(pmd_deref(*pmd))); |
| 129 | pmd_clear(pmd); |
| 130 | } |
| 131 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 132 | /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ |
| 133 | static int __ref modify_pmd_table(pud_t *pud, unsigned long addr, |
| 134 | unsigned long end, bool add, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 135 | { |
| 136 | unsigned long next, prot, pages = 0; |
| 137 | int ret = -ENOMEM; |
| 138 | pmd_t *pmd; |
| 139 | pte_t *pte; |
| 140 | |
| 141 | prot = pgprot_val(SEGMENT_KERNEL); |
| 142 | if (!MACHINE_HAS_NX) |
| 143 | prot &= ~_SEGMENT_ENTRY_NOEXEC; |
| 144 | |
| 145 | pmd = pmd_offset(pud, addr); |
| 146 | for (; addr < end; addr = next, pmd++) { |
| 147 | next = pmd_addr_end(addr, end); |
| 148 | |
| 149 | if (!add) { |
| 150 | if (pmd_none(*pmd)) |
| 151 | continue; |
| 152 | if (pmd_large(*pmd) && !add) { |
| 153 | if (IS_ALIGNED(addr, PMD_SIZE) && |
| 154 | IS_ALIGNED(next, PMD_SIZE)) { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 155 | if (!direct) |
| 156 | vmem_free_pages(pmd_deref(*pmd), |
| 157 | get_order(PMD_SIZE)); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 158 | pmd_clear(pmd); |
| 159 | pages++; |
| 160 | } |
| 161 | continue; |
| 162 | } |
| 163 | } else if (pmd_none(*pmd)) { |
| 164 | if (IS_ALIGNED(addr, PMD_SIZE) && |
| 165 | IS_ALIGNED(next, PMD_SIZE) && |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 166 | MACHINE_HAS_EDAT1 && addr && direct && |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 167 | !debug_pagealloc_enabled()) { |
| 168 | pmd_val(*pmd) = addr | prot; |
| 169 | pages++; |
| 170 | continue; |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 171 | } else if (!direct && MACHINE_HAS_EDAT1) { |
| 172 | void *new_page; |
| 173 | |
| 174 | /* |
| 175 | * Use 1MB frames for vmemmap if available. We |
| 176 | * always use large frames even if they are only |
| 177 | * partially used. Otherwise we would have also |
| 178 | * page tables since vmemmap_populate gets |
| 179 | * called for each section separately. |
| 180 | */ |
| 181 | new_page = vmemmap_alloc_block(PMD_SIZE, |
| 182 | NUMA_NO_NODE); |
| 183 | if (!new_page) |
| 184 | goto out; |
| 185 | pmd_val(*pmd) = __pa(new_page) | prot; |
| 186 | continue; |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 187 | } |
| 188 | pte = vmem_pte_alloc(); |
| 189 | if (!pte) |
| 190 | goto out; |
| 191 | pmd_populate(&init_mm, pmd, pte); |
| 192 | } else if (pmd_large(*pmd)) |
| 193 | continue; |
| 194 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 195 | ret = modify_pte_table(pmd, addr, next, add, direct); |
| 196 | if (ret) |
| 197 | goto out; |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 198 | if (!add) |
| 199 | try_free_pte_table(pmd, addr & PMD_MASK); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 200 | } |
| 201 | ret = 0; |
| 202 | out: |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 203 | if (direct) |
| 204 | update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 205 | return ret; |
| 206 | } |
| 207 | |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 208 | static void try_free_pmd_table(pud_t *pud, unsigned long start) |
| 209 | { |
| 210 | const unsigned long end = start + PUD_SIZE; |
| 211 | pmd_t *pmd; |
| 212 | int i; |
| 213 | |
| 214 | /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ |
| 215 | if (end > VMALLOC_START) |
| 216 | return; |
| 217 | #ifdef CONFIG_KASAN |
| 218 | if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) |
| 219 | return; |
| 220 | #endif |
| 221 | |
| 222 | pmd = pmd_offset(pud, start); |
| 223 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) |
| 224 | if (!pmd_none(*pmd)) |
| 225 | return; |
| 226 | |
| 227 | vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER); |
| 228 | pud_clear(pud); |
| 229 | } |
| 230 | |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 231 | static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 232 | bool add, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 233 | { |
| 234 | unsigned long next, prot, pages = 0; |
| 235 | int ret = -ENOMEM; |
| 236 | pud_t *pud; |
| 237 | pmd_t *pmd; |
| 238 | |
| 239 | prot = pgprot_val(REGION3_KERNEL); |
| 240 | if (!MACHINE_HAS_NX) |
| 241 | prot &= ~_REGION_ENTRY_NOEXEC; |
| 242 | |
| 243 | pud = pud_offset(p4d, addr); |
| 244 | for (; addr < end; addr = next, pud++) { |
| 245 | next = pud_addr_end(addr, end); |
| 246 | |
| 247 | if (!add) { |
| 248 | if (pud_none(*pud)) |
| 249 | continue; |
| 250 | if (pud_large(*pud)) { |
| 251 | if (IS_ALIGNED(addr, PUD_SIZE) && |
| 252 | IS_ALIGNED(next, PUD_SIZE)) { |
| 253 | pud_clear(pud); |
| 254 | pages++; |
| 255 | } |
| 256 | continue; |
| 257 | } |
| 258 | } else if (pud_none(*pud)) { |
| 259 | if (IS_ALIGNED(addr, PUD_SIZE) && |
| 260 | IS_ALIGNED(next, PUD_SIZE) && |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 261 | MACHINE_HAS_EDAT2 && addr && direct && |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 262 | !debug_pagealloc_enabled()) { |
| 263 | pud_val(*pud) = addr | prot; |
| 264 | pages++; |
| 265 | continue; |
| 266 | } |
| 267 | pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); |
| 268 | if (!pmd) |
| 269 | goto out; |
| 270 | pud_populate(&init_mm, pud, pmd); |
| 271 | } else if (pud_large(*pud)) |
| 272 | continue; |
| 273 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 274 | ret = modify_pmd_table(pud, addr, next, add, direct); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 275 | if (ret) |
| 276 | goto out; |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 277 | if (!add) |
| 278 | try_free_pmd_table(pud, addr & PUD_MASK); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 279 | } |
| 280 | ret = 0; |
| 281 | out: |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 282 | if (direct) |
| 283 | update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 284 | return ret; |
| 285 | } |
| 286 | |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 287 | static void try_free_pud_table(p4d_t *p4d, unsigned long start) |
| 288 | { |
| 289 | const unsigned long end = start + P4D_SIZE; |
| 290 | pud_t *pud; |
| 291 | int i; |
| 292 | |
| 293 | /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ |
| 294 | if (end > VMALLOC_START) |
| 295 | return; |
| 296 | #ifdef CONFIG_KASAN |
| 297 | if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) |
| 298 | return; |
| 299 | #endif |
| 300 | |
| 301 | pud = pud_offset(p4d, start); |
| 302 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) |
| 303 | if (!pud_none(*pud)) |
| 304 | return; |
| 305 | |
| 306 | vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER); |
| 307 | p4d_clear(p4d); |
| 308 | } |
| 309 | |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 310 | static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end, |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 311 | bool add, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 312 | { |
| 313 | unsigned long next; |
| 314 | int ret = -ENOMEM; |
| 315 | p4d_t *p4d; |
| 316 | pud_t *pud; |
| 317 | |
| 318 | p4d = p4d_offset(pgd, addr); |
| 319 | for (; addr < end; addr = next, p4d++) { |
| 320 | next = p4d_addr_end(addr, end); |
| 321 | |
| 322 | if (!add) { |
| 323 | if (p4d_none(*p4d)) |
| 324 | continue; |
| 325 | } else if (p4d_none(*p4d)) { |
| 326 | pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); |
| 327 | if (!pud) |
| 328 | goto out; |
| 329 | } |
| 330 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 331 | ret = modify_pud_table(p4d, addr, next, add, direct); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 332 | if (ret) |
| 333 | goto out; |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 334 | if (!add) |
| 335 | try_free_pud_table(p4d, addr & P4D_MASK); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 336 | } |
| 337 | ret = 0; |
| 338 | out: |
| 339 | return ret; |
| 340 | } |
| 341 | |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 342 | static void try_free_p4d_table(pgd_t *pgd, unsigned long start) |
| 343 | { |
| 344 | const unsigned long end = start + PGDIR_SIZE; |
| 345 | p4d_t *p4d; |
| 346 | int i; |
| 347 | |
| 348 | /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ |
| 349 | if (end > VMALLOC_START) |
| 350 | return; |
| 351 | #ifdef CONFIG_KASAN |
| 352 | if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end) |
| 353 | return; |
| 354 | #endif |
| 355 | |
| 356 | p4d = p4d_offset(pgd, start); |
| 357 | for (i = 0; i < PTRS_PER_P4D; i++, p4d++) |
| 358 | if (!p4d_none(*p4d)) |
| 359 | return; |
| 360 | |
| 361 | vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER); |
| 362 | pgd_clear(pgd); |
| 363 | } |
| 364 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 365 | static int modify_pagetable(unsigned long start, unsigned long end, bool add, |
| 366 | bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 367 | { |
| 368 | unsigned long addr, next; |
| 369 | int ret = -ENOMEM; |
| 370 | pgd_t *pgd; |
| 371 | p4d_t *p4d; |
| 372 | |
| 373 | if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) |
| 374 | return -EINVAL; |
| 375 | |
| 376 | for (addr = start; addr < end; addr = next) { |
| 377 | next = pgd_addr_end(addr, end); |
| 378 | pgd = pgd_offset_k(addr); |
| 379 | |
| 380 | if (!add) { |
| 381 | if (pgd_none(*pgd)) |
| 382 | continue; |
| 383 | } else if (pgd_none(*pgd)) { |
| 384 | p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); |
| 385 | if (!p4d) |
| 386 | goto out; |
| 387 | pgd_populate(&init_mm, pgd, p4d); |
| 388 | } |
| 389 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 390 | ret = modify_p4d_table(pgd, addr, next, add, direct); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 391 | if (ret) |
| 392 | goto out; |
David Hildenbrand | b9ff810 | 2020-07-22 11:45:55 +0200 | [diff] [blame^] | 393 | if (!add) |
| 394 | try_free_p4d_table(pgd, addr & PGDIR_MASK); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 395 | } |
| 396 | ret = 0; |
| 397 | out: |
| 398 | if (!add) |
| 399 | flush_tlb_kernel_range(start, end); |
| 400 | return ret; |
| 401 | } |
| 402 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 403 | static int add_pagetable(unsigned long start, unsigned long end, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 404 | { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 405 | return modify_pagetable(start, end, true, direct); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 406 | } |
| 407 | |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 408 | static int remove_pagetable(unsigned long start, unsigned long end, bool direct) |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 409 | { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 410 | return modify_pagetable(start, end, false, direct); |
David Hildenbrand | 3e0d3e4 | 2020-07-22 11:45:51 +0200 | [diff] [blame] | 411 | } |
| 412 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 413 | /* |
| 414 | * Add a physical memory range to the 1:1 mapping. |
| 415 | */ |
David Hildenbrand | 8398b22 | 2020-07-22 11:45:50 +0200 | [diff] [blame] | 416 | static int vmem_add_range(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 417 | { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 418 | return add_pagetable(start, start + size, true); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 419 | } |
| 420 | |
| 421 | /* |
| 422 | * Remove a physical memory range from the 1:1 mapping. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 423 | */ |
| 424 | static void vmem_remove_range(unsigned long start, unsigned long size) |
| 425 | { |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 426 | remove_pagetable(start, start + size, true); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 427 | } |
| 428 | |
| 429 | /* |
| 430 | * Add a backed mem_map array to the virtual mem_map array. |
| 431 | */ |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 432 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, |
| 433 | struct vmem_altmap *altmap) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 434 | { |
David Hildenbrand | c00f05a | 2020-07-22 11:45:53 +0200 | [diff] [blame] | 435 | int ret; |
| 436 | |
David Hildenbrand | aa18e0e | 2020-07-22 11:45:54 +0200 | [diff] [blame] | 437 | mutex_lock(&vmem_mutex); |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 438 | /* We don't care about the node, just use NUMA_NO_NODE on allocations */ |
David Hildenbrand | c00f05a | 2020-07-22 11:45:53 +0200 | [diff] [blame] | 439 | ret = add_pagetable(start, end, false); |
| 440 | if (ret) |
| 441 | remove_pagetable(start, end, false); |
David Hildenbrand | aa18e0e | 2020-07-22 11:45:54 +0200 | [diff] [blame] | 442 | mutex_unlock(&vmem_mutex); |
David Hildenbrand | c00f05a | 2020-07-22 11:45:53 +0200 | [diff] [blame] | 443 | return ret; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 444 | } |
| 445 | |
Christoph Hellwig | 24b6d41 | 2017-12-29 08:53:56 +0100 | [diff] [blame] | 446 | void vmemmap_free(unsigned long start, unsigned long end, |
| 447 | struct vmem_altmap *altmap) |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 448 | { |
David Hildenbrand | aa18e0e | 2020-07-22 11:45:54 +0200 | [diff] [blame] | 449 | mutex_lock(&vmem_mutex); |
David Hildenbrand | 9ec8fa8 | 2020-07-22 11:45:52 +0200 | [diff] [blame] | 450 | remove_pagetable(start, end, false); |
David Hildenbrand | aa18e0e | 2020-07-22 11:45:54 +0200 | [diff] [blame] | 451 | mutex_unlock(&vmem_mutex); |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 452 | } |
| 453 | |
David Hildenbrand | f05f62d | 2020-06-25 17:00:29 +0200 | [diff] [blame] | 454 | void vmem_remove_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 455 | { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 456 | mutex_lock(&vmem_mutex); |
David Hildenbrand | f05f62d | 2020-06-25 17:00:29 +0200 | [diff] [blame] | 457 | vmem_remove_range(start, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 458 | mutex_unlock(&vmem_mutex); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 459 | } |
| 460 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 461 | int vmem_add_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 462 | { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 463 | int ret; |
| 464 | |
David Hildenbrand | f05f62d | 2020-06-25 17:00:29 +0200 | [diff] [blame] | 465 | if (start + size > VMEM_MAX_PHYS || |
| 466 | start + size < start) |
| 467 | return -ERANGE; |
| 468 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 469 | mutex_lock(&vmem_mutex); |
David Hildenbrand | 8398b22 | 2020-07-22 11:45:50 +0200 | [diff] [blame] | 470 | ret = vmem_add_range(start, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 471 | if (ret) |
David Hildenbrand | f05f62d | 2020-06-25 17:00:29 +0200 | [diff] [blame] | 472 | vmem_remove_range(start, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 473 | mutex_unlock(&vmem_mutex); |
| 474 | return ret; |
| 475 | } |
| 476 | |
| 477 | /* |
| 478 | * map whole physical memory to virtual memory (identity mapping) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 479 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
| 480 | * additional memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 481 | */ |
| 482 | void __init vmem_map_init(void) |
| 483 | { |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 484 | struct memblock_region *reg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 485 | |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 486 | for_each_memblock(memory, reg) |
David Hildenbrand | 8398b22 | 2020-07-22 11:45:50 +0200 | [diff] [blame] | 487 | vmem_add_range(reg->base, reg->size); |
Heiko Carstens | ead7a22 | 2017-11-08 11:18:29 +0100 | [diff] [blame] | 488 | __set_memory((unsigned long)_stext, |
| 489 | (unsigned long)(_etext - _stext) >> PAGE_SHIFT, |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 490 | SET_MEMORY_RO | SET_MEMORY_X); |
Heiko Carstens | ead7a22 | 2017-11-08 11:18:29 +0100 | [diff] [blame] | 491 | __set_memory((unsigned long)_etext, |
| 492 | (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT, |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 493 | SET_MEMORY_RO); |
Heiko Carstens | ead7a22 | 2017-11-08 11:18:29 +0100 | [diff] [blame] | 494 | __set_memory((unsigned long)_sinittext, |
| 495 | (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT, |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 496 | SET_MEMORY_RO | SET_MEMORY_X); |
Gerald Schaefer | a80313f | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 497 | __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT, |
| 498 | SET_MEMORY_RO | SET_MEMORY_X); |
Sven Schnelle | 0b38b5e | 2020-01-22 13:38:22 +0100 | [diff] [blame] | 499 | |
| 500 | /* we need lowcore executable for our LPSWE instructions */ |
| 501 | set_memory_x(0, 1); |
| 502 | |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 503 | pr_info("Write protected kernel read-only data: %luk\n", |
Heiko Carstens | ead7a22 | 2017-11-08 11:18:29 +0100 | [diff] [blame] | 504 | (unsigned long)(__end_rodata - _stext) >> 10); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 505 | } |