blob: 56ab9bb770f3adbaeb5a8996f1ac6699f3949e90 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01003 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01008#include <linux/pfn.h>
9#include <linux/mm.h>
Paul Gortmakerff24b072017-02-09 15:20:24 -050010#include <linux/init.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010011#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020012#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020014#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010016#include <asm/setup.h>
17#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020018#include <asm/sections.h>
Laura Abbotte6c7c632017-05-08 15:58:08 -070019#include <asm/set_memory.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021static DEFINE_MUTEX(vmem_mutex);
22
Heiko Carstens67060d92008-05-30 10:03:27 +020023static void __ref *vmem_alloc_pages(unsigned int order)
24{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020025 unsigned long size = PAGE_SIZE << order;
26
Heiko Carstens67060d92008-05-30 10:03:27 +020027 if (slab_is_available())
28 return (void *)__get_free_pages(GFP_KERNEL, order);
Mike Rapoport9a8dd702018-10-30 15:07:59 -070029 return (void *) memblock_phys_alloc(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020030}
31
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020032static void vmem_free_pages(unsigned long addr, int order)
33{
34 /* We don't expect boot memory to be removed ever. */
35 if (!slab_is_available() ||
36 WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
37 return;
38 free_pages(addr, order);
39}
40
Heiko Carstensa01ef302017-06-16 17:51:15 +020041void *vmem_crst_alloc(unsigned long val)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020042{
Heiko Carstensa01ef302017-06-16 17:51:15 +020043 unsigned long *table;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020044
Heiko Carstensa01ef302017-06-16 17:51:15 +020045 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
46 if (table)
47 crst_table_init(table, val);
48 return table;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010049}
50
Heiko Carstense8a97e42016-05-17 10:50:15 +020051pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Heiko Carstens9e427362016-10-18 13:35:32 +020053 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010054 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010055
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010056 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020057 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010058 else
Mike Rapoport9a8dd702018-10-30 15:07:59 -070059 pte = (pte_t *) memblock_phys_alloc(size, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060 if (!pte)
61 return NULL;
Heiko Carstens41879ff2017-10-04 19:27:07 +020062 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063 return pte;
64}
65
David Hildenbrandb9ff8102020-07-22 11:45:55 +020066static void vmem_pte_free(unsigned long *table)
67{
68 /* We don't expect boot memory to be removed ever. */
69 if (!slab_is_available() ||
70 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
71 return;
72 page_table_free(&init_mm, table);
73}
74
David Hildenbrandcd5781d2020-07-22 11:45:57 +020075#define PAGE_UNUSED 0xFD
76
David Hildenbrand2c114df2020-07-22 11:45:58 +020077/*
78 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
79 * from unused_pmd_start to next PMD_SIZE boundary.
80 */
81static unsigned long unused_pmd_start;
82
83static void vmemmap_flush_unused_pmd(void)
84{
85 if (!unused_pmd_start)
86 return;
87 memset(__va(unused_pmd_start), PAGE_UNUSED,
88 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
89 unused_pmd_start = 0;
90}
91
92static void __vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
David Hildenbrandcd5781d2020-07-22 11:45:57 +020093{
94 /*
95 * As we expect to add in the same granularity as we remove, it's
96 * sufficient to mark only some piece used to block the memmap page from
97 * getting removed (just in case the memmap never gets initialized,
98 * e.g., because the memory block never gets onlined).
99 */
100 memset(__va(start), 0, sizeof(struct page));
101}
102
David Hildenbrand2c114df2020-07-22 11:45:58 +0200103static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
104{
105 /*
106 * We only optimize if the new used range directly follows the
107 * previously unused range (esp., when populating consecutive sections).
108 */
109 if (unused_pmd_start == start) {
110 unused_pmd_start = end;
111 if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE)))
112 unused_pmd_start = 0;
113 return;
114 }
115 vmemmap_flush_unused_pmd();
116 __vmemmap_use_sub_pmd(start, end);
117}
118
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200119static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
120{
121 void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
122
David Hildenbrand2c114df2020-07-22 11:45:58 +0200123 vmemmap_flush_unused_pmd();
124
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200125 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
David Hildenbrand2c114df2020-07-22 11:45:58 +0200126 __vmemmap_use_sub_pmd(start, end);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200127
128 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
129 if (!IS_ALIGNED(start, PMD_SIZE))
130 memset(page, PAGE_UNUSED, start - __pa(page));
David Hildenbrand2c114df2020-07-22 11:45:58 +0200131 /*
132 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
133 * consecutive sections. Remember for the last added PMD the last
134 * unused range in the populated PMD.
135 */
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200136 if (!IS_ALIGNED(end, PMD_SIZE))
David Hildenbrand2c114df2020-07-22 11:45:58 +0200137 unused_pmd_start = end;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200138}
139
140/* Returns true if the PMD is completely unused and can be freed. */
141static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
142{
143 void *page = __va(ALIGN_DOWN(start, PMD_SIZE));
144
David Hildenbrand2c114df2020-07-22 11:45:58 +0200145 vmemmap_flush_unused_pmd();
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200146 memset(__va(start), PAGE_UNUSED, end - start);
147 return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE);
148}
149
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200150/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
151static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
152 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200153{
154 unsigned long prot, pages = 0;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200155 int ret = -ENOMEM;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200156 pte_t *pte;
157
158 prot = pgprot_val(PAGE_KERNEL);
159 if (!MACHINE_HAS_NX)
160 prot &= ~_PAGE_NOEXEC;
161
162 pte = pte_offset_kernel(pmd, addr);
163 for (; addr < end; addr += PAGE_SIZE, pte++) {
164 if (!add) {
165 if (pte_none(*pte))
166 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200167 if (!direct)
168 vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200169 pte_clear(&init_mm, addr, pte);
170 } else if (pte_none(*pte)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200171 if (!direct) {
Heiko Carstens9a996c62020-07-23 21:42:36 +0200172 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200173
174 if (!new_page)
175 goto out;
176 pte_val(*pte) = __pa(new_page) | prot;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200177 } else {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200178 pte_val(*pte) = addr | prot;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200179 }
180 } else {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200181 continue;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200182 }
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200183 pages++;
184 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200185 ret = 0;
186out:
187 if (direct)
188 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
189 return ret;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200190}
191
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200192static void try_free_pte_table(pmd_t *pmd, unsigned long start)
193{
194 pte_t *pte;
195 int i;
196
197 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
198 pte = pte_offset_kernel(pmd, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200199 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200200 if (!pte_none(*pte))
201 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200202 }
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200203 vmem_pte_free(__va(pmd_deref(*pmd)));
204 pmd_clear(pmd);
205}
206
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200207/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
208static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
209 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200210{
211 unsigned long next, prot, pages = 0;
212 int ret = -ENOMEM;
213 pmd_t *pmd;
214 pte_t *pte;
215
216 prot = pgprot_val(SEGMENT_KERNEL);
217 if (!MACHINE_HAS_NX)
218 prot &= ~_SEGMENT_ENTRY_NOEXEC;
219
220 pmd = pmd_offset(pud, addr);
221 for (; addr < end; addr = next, pmd++) {
222 next = pmd_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200223 if (!add) {
224 if (pmd_none(*pmd))
225 continue;
Alexander Gordeevaf716572020-11-10 10:36:21 +0100226 if (pmd_large(*pmd)) {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200227 if (IS_ALIGNED(addr, PMD_SIZE) &&
228 IS_ALIGNED(next, PMD_SIZE)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200229 if (!direct)
Heiko Carstens9a996c62020-07-23 21:42:36 +0200230 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200231 pmd_clear(pmd);
232 pages++;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200233 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
234 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200235 pmd_clear(pmd);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200236 }
237 continue;
238 }
239 } else if (pmd_none(*pmd)) {
240 if (IS_ALIGNED(addr, PMD_SIZE) &&
241 IS_ALIGNED(next, PMD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200242 MACHINE_HAS_EDAT1 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200243 !debug_pagealloc_enabled()) {
244 pmd_val(*pmd) = addr | prot;
245 pages++;
246 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200247 } else if (!direct && MACHINE_HAS_EDAT1) {
248 void *new_page;
249
250 /*
251 * Use 1MB frames for vmemmap if available. We
252 * always use large frames even if they are only
253 * partially used. Otherwise we would have also
254 * page tables since vmemmap_populate gets
255 * called for each section separately.
256 */
Heiko Carstens9a996c62020-07-23 21:42:36 +0200257 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
David Hildenbrandf2057b42020-07-22 11:45:56 +0200258 if (new_page) {
259 pmd_val(*pmd) = __pa(new_page) | prot;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200260 if (!IS_ALIGNED(addr, PMD_SIZE) ||
261 !IS_ALIGNED(next, PMD_SIZE)) {
Heiko Carstens9a996c62020-07-23 21:42:36 +0200262 vmemmap_use_new_sub_pmd(addr, next);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200263 }
David Hildenbrandf2057b42020-07-22 11:45:56 +0200264 continue;
265 }
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200266 }
267 pte = vmem_pte_alloc();
268 if (!pte)
269 goto out;
270 pmd_populate(&init_mm, pmd, pte);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200271 } else if (pmd_large(*pmd)) {
272 if (!direct)
273 vmemmap_use_sub_pmd(addr, next);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200274 continue;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200275 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200276 ret = modify_pte_table(pmd, addr, next, add, direct);
277 if (ret)
278 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200279 if (!add)
280 try_free_pte_table(pmd, addr & PMD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200281 }
282 ret = 0;
283out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200284 if (direct)
285 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200286 return ret;
287}
288
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200289static void try_free_pmd_table(pud_t *pud, unsigned long start)
290{
291 const unsigned long end = start + PUD_SIZE;
292 pmd_t *pmd;
293 int i;
294
295 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
296 if (end > VMALLOC_START)
297 return;
298#ifdef CONFIG_KASAN
299 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
300 return;
301#endif
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200302 pmd = pmd_offset(pud, start);
303 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
304 if (!pmd_none(*pmd))
305 return;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200306 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
307 pud_clear(pud);
308}
309
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200310static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200311 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200312{
313 unsigned long next, prot, pages = 0;
314 int ret = -ENOMEM;
315 pud_t *pud;
316 pmd_t *pmd;
317
318 prot = pgprot_val(REGION3_KERNEL);
319 if (!MACHINE_HAS_NX)
320 prot &= ~_REGION_ENTRY_NOEXEC;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200321 pud = pud_offset(p4d, addr);
322 for (; addr < end; addr = next, pud++) {
323 next = pud_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200324 if (!add) {
325 if (pud_none(*pud))
326 continue;
327 if (pud_large(*pud)) {
328 if (IS_ALIGNED(addr, PUD_SIZE) &&
329 IS_ALIGNED(next, PUD_SIZE)) {
330 pud_clear(pud);
331 pages++;
332 }
333 continue;
334 }
335 } else if (pud_none(*pud)) {
336 if (IS_ALIGNED(addr, PUD_SIZE) &&
337 IS_ALIGNED(next, PUD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200338 MACHINE_HAS_EDAT2 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200339 !debug_pagealloc_enabled()) {
340 pud_val(*pud) = addr | prot;
341 pages++;
342 continue;
343 }
344 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
345 if (!pmd)
346 goto out;
347 pud_populate(&init_mm, pud, pmd);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200348 } else if (pud_large(*pud)) {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200349 continue;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200350 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200351 ret = modify_pmd_table(pud, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200352 if (ret)
353 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200354 if (!add)
355 try_free_pmd_table(pud, addr & PUD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200356 }
357 ret = 0;
358out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200359 if (direct)
360 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200361 return ret;
362}
363
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200364static void try_free_pud_table(p4d_t *p4d, unsigned long start)
365{
366 const unsigned long end = start + P4D_SIZE;
367 pud_t *pud;
368 int i;
369
370 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
371 if (end > VMALLOC_START)
372 return;
373#ifdef CONFIG_KASAN
374 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
375 return;
376#endif
377
378 pud = pud_offset(p4d, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200379 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200380 if (!pud_none(*pud))
381 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200382 }
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200383 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
384 p4d_clear(p4d);
385}
386
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200387static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200388 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200389{
390 unsigned long next;
391 int ret = -ENOMEM;
392 p4d_t *p4d;
393 pud_t *pud;
394
395 p4d = p4d_offset(pgd, addr);
396 for (; addr < end; addr = next, p4d++) {
397 next = p4d_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200398 if (!add) {
399 if (p4d_none(*p4d))
400 continue;
401 } else if (p4d_none(*p4d)) {
402 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
403 if (!pud)
404 goto out;
Vasily Gorbikbffc2f72020-08-21 18:27:36 +0200405 p4d_populate(&init_mm, p4d, pud);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200406 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200407 ret = modify_pud_table(p4d, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200408 if (ret)
409 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200410 if (!add)
411 try_free_pud_table(p4d, addr & P4D_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200412 }
413 ret = 0;
414out:
415 return ret;
416}
417
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200418static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
419{
420 const unsigned long end = start + PGDIR_SIZE;
421 p4d_t *p4d;
422 int i;
423
424 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
425 if (end > VMALLOC_START)
426 return;
427#ifdef CONFIG_KASAN
428 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
429 return;
430#endif
431
432 p4d = p4d_offset(pgd, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200433 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200434 if (!p4d_none(*p4d))
435 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200436 }
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200437 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
438 pgd_clear(pgd);
439}
440
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200441static int modify_pagetable(unsigned long start, unsigned long end, bool add,
442 bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200443{
444 unsigned long addr, next;
445 int ret = -ENOMEM;
446 pgd_t *pgd;
447 p4d_t *p4d;
448
449 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
450 return -EINVAL;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200451 for (addr = start; addr < end; addr = next) {
452 next = pgd_addr_end(addr, end);
453 pgd = pgd_offset_k(addr);
454
455 if (!add) {
456 if (pgd_none(*pgd))
457 continue;
458 } else if (pgd_none(*pgd)) {
459 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
460 if (!p4d)
461 goto out;
462 pgd_populate(&init_mm, pgd, p4d);
463 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200464 ret = modify_p4d_table(pgd, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200465 if (ret)
466 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200467 if (!add)
468 try_free_p4d_table(pgd, addr & PGDIR_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200469 }
470 ret = 0;
471out:
472 if (!add)
473 flush_tlb_kernel_range(start, end);
474 return ret;
475}
476
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200477static int add_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200478{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200479 return modify_pagetable(start, end, true, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200480}
481
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200482static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200483{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200484 return modify_pagetable(start, end, false, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200485}
486
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100487/*
488 * Add a physical memory range to the 1:1 mapping.
489 */
David Hildenbrand8398b222020-07-22 11:45:50 +0200490static int vmem_add_range(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100491{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200492 return add_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100493}
494
495/*
496 * Remove a physical memory range from the 1:1 mapping.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100497 */
498static void vmem_remove_range(unsigned long start, unsigned long size)
499{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200500 remove_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100501}
502
503/*
504 * Add a backed mem_map array to the virtual mem_map array.
505 */
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100506int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
Heiko Carstens9a996c62020-07-23 21:42:36 +0200507 struct vmem_altmap *altmap)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100508{
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200509 int ret;
510
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200511 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200512 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200513 ret = add_pagetable(start, end, false);
514 if (ret)
515 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200516 mutex_unlock(&vmem_mutex);
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200517 return ret;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100518}
519
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100520void vmemmap_free(unsigned long start, unsigned long end,
Heiko Carstens9a996c62020-07-23 21:42:36 +0200521 struct vmem_altmap *altmap)
Tang Chen01975182013-02-22 16:33:08 -0800522{
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200523 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200524 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200525 mutex_unlock(&vmem_mutex);
Tang Chen01975182013-02-22 16:33:08 -0800526}
527
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200528void vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100529{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100530 mutex_lock(&vmem_mutex);
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200531 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100532 mutex_unlock(&vmem_mutex);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100533}
534
Heiko Carstens17f34582008-04-30 13:38:47 +0200535int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100536{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100537 int ret;
538
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200539 if (start + size > VMEM_MAX_PHYS ||
540 start + size < start)
541 return -ERANGE;
542
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100543 mutex_lock(&vmem_mutex);
David Hildenbrand8398b222020-07-22 11:45:50 +0200544 ret = vmem_add_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100545 if (ret)
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200546 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100547 mutex_unlock(&vmem_mutex);
548 return ret;
549}
550
551/*
552 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100553 * we reserve enough space in the vmalloc area for vmemmap to hotplug
554 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100555 */
556void __init vmem_map_init(void)
557{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700558 phys_addr_t base, end;
559 u64 i;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100560
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700561 for_each_mem_range(i, &base, &end)
562 vmem_add_range(base, end - base);
Heiko Carstensead7a222017-11-08 11:18:29 +0100563 __set_memory((unsigned long)_stext,
564 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100565 SET_MEMORY_RO | SET_MEMORY_X);
Heiko Carstensead7a222017-11-08 11:18:29 +0100566 __set_memory((unsigned long)_etext,
567 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100568 SET_MEMORY_RO);
Heiko Carstensead7a222017-11-08 11:18:29 +0100569 __set_memory((unsigned long)_sinittext,
570 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100571 SET_MEMORY_RO | SET_MEMORY_X);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100572 __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
573 SET_MEMORY_RO | SET_MEMORY_X);
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100574
575 /* we need lowcore executable for our LPSWE instructions */
576 set_memory_x(0, 1);
577
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100578 pr_info("Write protected kernel read-only data: %luk\n",
Heiko Carstensead7a222017-11-08 11:18:29 +0100579 (unsigned long)(__end_rodata - _stext) >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100580}