blob: 96897fab89dc899baddb63e7953354823726d2c1 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01003 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
Anshuman Khandual77072482021-02-25 17:17:41 -08007#include <linux/memory_hotplug.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -07008#include <linux/memblock.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01009#include <linux/pfn.h>
10#include <linux/mm.h>
Paul Gortmakerff24b072017-02-09 15:20:24 -050011#include <linux/init.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010012#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020013#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020015#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010016#include <asm/pgalloc.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010017#include <asm/setup.h>
18#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020019#include <asm/sections.h>
Laura Abbotte6c7c632017-05-08 15:58:08 -070020#include <asm/set_memory.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010022static DEFINE_MUTEX(vmem_mutex);
23
Heiko Carstens67060d92008-05-30 10:03:27 +020024static void __ref *vmem_alloc_pages(unsigned int order)
25{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020026 unsigned long size = PAGE_SIZE << order;
27
Heiko Carstens67060d92008-05-30 10:03:27 +020028 if (slab_is_available())
29 return (void *)__get_free_pages(GFP_KERNEL, order);
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +010030 return memblock_alloc(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020031}
32
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020033static void vmem_free_pages(unsigned long addr, int order)
34{
35 /* We don't expect boot memory to be removed ever. */
36 if (!slab_is_available() ||
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +010037 WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020038 return;
39 free_pages(addr, order);
40}
41
Heiko Carstensa01ef302017-06-16 17:51:15 +020042void *vmem_crst_alloc(unsigned long val)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020043{
Heiko Carstensa01ef302017-06-16 17:51:15 +020044 unsigned long *table;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020045
Heiko Carstensa01ef302017-06-16 17:51:15 +020046 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
47 if (table)
48 crst_table_init(table, val);
49 return table;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010050}
51
Heiko Carstense8a97e42016-05-17 10:50:15 +020052pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010053{
Heiko Carstens9e427362016-10-18 13:35:32 +020054 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010055 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010056
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010057 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020058 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010059 else
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +010060 pte = (pte_t *) memblock_alloc(size, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010061 if (!pte)
62 return NULL;
Heiko Carstens41879ff2017-10-04 19:27:07 +020063 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010064 return pte;
65}
66
David Hildenbrandb9ff8102020-07-22 11:45:55 +020067static void vmem_pte_free(unsigned long *table)
68{
69 /* We don't expect boot memory to be removed ever. */
70 if (!slab_is_available() ||
71 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
72 return;
73 page_table_free(&init_mm, table);
74}
75
David Hildenbrandcd5781d2020-07-22 11:45:57 +020076#define PAGE_UNUSED 0xFD
77
David Hildenbrand2c114df2020-07-22 11:45:58 +020078/*
79 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010080 * from unused_sub_pmd_start to next PMD_SIZE boundary.
David Hildenbrand2c114df2020-07-22 11:45:58 +020081 */
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010082static unsigned long unused_sub_pmd_start;
David Hildenbrand2c114df2020-07-22 11:45:58 +020083
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010084static void vmemmap_flush_unused_sub_pmd(void)
David Hildenbrand2c114df2020-07-22 11:45:58 +020085{
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010086 if (!unused_sub_pmd_start)
David Hildenbrand2c114df2020-07-22 11:45:58 +020087 return;
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +010088 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010089 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
90 unused_sub_pmd_start = 0;
David Hildenbrand2c114df2020-07-22 11:45:58 +020091}
92
Alexander Gordeev12bb4c62020-11-10 10:36:23 +010093static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
David Hildenbrandcd5781d2020-07-22 11:45:57 +020094{
95 /*
96 * As we expect to add in the same granularity as we remove, it's
97 * sufficient to mark only some piece used to block the memmap page from
98 * getting removed (just in case the memmap never gets initialized,
99 * e.g., because the memory block never gets onlined).
100 */
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100101 memset((void *)start, 0, sizeof(struct page));
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200102}
103
David Hildenbrand2c114df2020-07-22 11:45:58 +0200104static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
105{
106 /*
107 * We only optimize if the new used range directly follows the
108 * previously unused range (esp., when populating consecutive sections).
109 */
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100110 if (unused_sub_pmd_start == start) {
111 unused_sub_pmd_start = end;
112 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
113 unused_sub_pmd_start = 0;
David Hildenbrand2c114df2020-07-22 11:45:58 +0200114 return;
115 }
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100116 vmemmap_flush_unused_sub_pmd();
117 vmemmap_mark_sub_pmd_used(start, end);
David Hildenbrand2c114df2020-07-22 11:45:58 +0200118}
119
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200120static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
121{
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100122 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200123
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100124 vmemmap_flush_unused_sub_pmd();
David Hildenbrand2c114df2020-07-22 11:45:58 +0200125
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200126 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100127 vmemmap_mark_sub_pmd_used(start, end);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200128
129 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
130 if (!IS_ALIGNED(start, PMD_SIZE))
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100131 memset((void *)page, PAGE_UNUSED, start - page);
David Hildenbrand2c114df2020-07-22 11:45:58 +0200132 /*
133 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
134 * consecutive sections. Remember for the last added PMD the last
135 * unused range in the populated PMD.
136 */
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200137 if (!IS_ALIGNED(end, PMD_SIZE))
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100138 unused_sub_pmd_start = end;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200139}
140
141/* Returns true if the PMD is completely unused and can be freed. */
142static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
143{
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100144 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200145
Alexander Gordeev12bb4c62020-11-10 10:36:23 +0100146 vmemmap_flush_unused_sub_pmd();
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100147 memset((void *)start, PAGE_UNUSED, end - start);
148 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200149}
150
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200151/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
152static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
153 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200154{
155 unsigned long prot, pages = 0;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200156 int ret = -ENOMEM;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200157 pte_t *pte;
158
159 prot = pgprot_val(PAGE_KERNEL);
160 if (!MACHINE_HAS_NX)
161 prot &= ~_PAGE_NOEXEC;
162
163 pte = pte_offset_kernel(pmd, addr);
164 for (; addr < end; addr += PAGE_SIZE, pte++) {
165 if (!add) {
166 if (pte_none(*pte))
167 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200168 if (!direct)
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100169 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200170 pte_clear(&init_mm, addr, pte);
171 } else if (pte_none(*pte)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200172 if (!direct) {
Heiko Carstens9a996c62020-07-23 21:42:36 +0200173 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200174
175 if (!new_page)
176 goto out;
177 pte_val(*pte) = __pa(new_page) | prot;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200178 } else {
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100179 pte_val(*pte) = __pa(addr) | prot;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200180 }
181 } else {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200182 continue;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200183 }
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200184 pages++;
185 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200186 ret = 0;
187out:
188 if (direct)
189 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
190 return ret;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200191}
192
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200193static void try_free_pte_table(pmd_t *pmd, unsigned long start)
194{
195 pte_t *pte;
196 int i;
197
198 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
199 pte = pte_offset_kernel(pmd, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200200 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200201 if (!pte_none(*pte))
202 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200203 }
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100204 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200205 pmd_clear(pmd);
206}
207
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200208/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
209static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
210 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200211{
212 unsigned long next, prot, pages = 0;
213 int ret = -ENOMEM;
214 pmd_t *pmd;
215 pte_t *pte;
216
217 prot = pgprot_val(SEGMENT_KERNEL);
218 if (!MACHINE_HAS_NX)
219 prot &= ~_SEGMENT_ENTRY_NOEXEC;
220
221 pmd = pmd_offset(pud, addr);
222 for (; addr < end; addr = next, pmd++) {
223 next = pmd_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200224 if (!add) {
225 if (pmd_none(*pmd))
226 continue;
Alexander Gordeevaf716572020-11-10 10:36:21 +0100227 if (pmd_large(*pmd)) {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200228 if (IS_ALIGNED(addr, PMD_SIZE) &&
229 IS_ALIGNED(next, PMD_SIZE)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200230 if (!direct)
Heiko Carstens9a996c62020-07-23 21:42:36 +0200231 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200232 pmd_clear(pmd);
233 pages++;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200234 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
235 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200236 pmd_clear(pmd);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200237 }
238 continue;
239 }
240 } else if (pmd_none(*pmd)) {
241 if (IS_ALIGNED(addr, PMD_SIZE) &&
242 IS_ALIGNED(next, PMD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200243 MACHINE_HAS_EDAT1 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200244 !debug_pagealloc_enabled()) {
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100245 pmd_val(*pmd) = __pa(addr) | prot;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200246 pages++;
247 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200248 } else if (!direct && MACHINE_HAS_EDAT1) {
249 void *new_page;
250
251 /*
252 * Use 1MB frames for vmemmap if available. We
253 * always use large frames even if they are only
254 * partially used. Otherwise we would have also
255 * page tables since vmemmap_populate gets
256 * called for each section separately.
257 */
Heiko Carstens9a996c62020-07-23 21:42:36 +0200258 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
David Hildenbrandf2057b42020-07-22 11:45:56 +0200259 if (new_page) {
260 pmd_val(*pmd) = __pa(new_page) | prot;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200261 if (!IS_ALIGNED(addr, PMD_SIZE) ||
262 !IS_ALIGNED(next, PMD_SIZE)) {
Heiko Carstens9a996c62020-07-23 21:42:36 +0200263 vmemmap_use_new_sub_pmd(addr, next);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200264 }
David Hildenbrandf2057b42020-07-22 11:45:56 +0200265 continue;
266 }
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200267 }
268 pte = vmem_pte_alloc();
269 if (!pte)
270 goto out;
271 pmd_populate(&init_mm, pmd, pte);
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200272 } else if (pmd_large(*pmd)) {
273 if (!direct)
274 vmemmap_use_sub_pmd(addr, next);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200275 continue;
David Hildenbrandcd5781d2020-07-22 11:45:57 +0200276 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200277 ret = modify_pte_table(pmd, addr, next, add, direct);
278 if (ret)
279 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200280 if (!add)
281 try_free_pte_table(pmd, addr & PMD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200282 }
283 ret = 0;
284out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200285 if (direct)
286 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200287 return ret;
288}
289
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200290static void try_free_pmd_table(pud_t *pud, unsigned long start)
291{
292 const unsigned long end = start + PUD_SIZE;
293 pmd_t *pmd;
294 int i;
295
296 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
297 if (end > VMALLOC_START)
298 return;
299#ifdef CONFIG_KASAN
300 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
301 return;
302#endif
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200303 pmd = pmd_offset(pud, start);
304 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
305 if (!pmd_none(*pmd))
306 return;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200307 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
308 pud_clear(pud);
309}
310
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200311static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200312 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200313{
314 unsigned long next, prot, pages = 0;
315 int ret = -ENOMEM;
316 pud_t *pud;
317 pmd_t *pmd;
318
319 prot = pgprot_val(REGION3_KERNEL);
320 if (!MACHINE_HAS_NX)
321 prot &= ~_REGION_ENTRY_NOEXEC;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200322 pud = pud_offset(p4d, addr);
323 for (; addr < end; addr = next, pud++) {
324 next = pud_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200325 if (!add) {
326 if (pud_none(*pud))
327 continue;
328 if (pud_large(*pud)) {
329 if (IS_ALIGNED(addr, PUD_SIZE) &&
330 IS_ALIGNED(next, PUD_SIZE)) {
331 pud_clear(pud);
332 pages++;
333 }
334 continue;
335 }
336 } else if (pud_none(*pud)) {
337 if (IS_ALIGNED(addr, PUD_SIZE) &&
338 IS_ALIGNED(next, PUD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200339 MACHINE_HAS_EDAT2 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200340 !debug_pagealloc_enabled()) {
Alexander Gordeev4c86d2f2021-02-12 07:43:19 +0100341 pud_val(*pud) = __pa(addr) | prot;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200342 pages++;
343 continue;
344 }
345 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
346 if (!pmd)
347 goto out;
348 pud_populate(&init_mm, pud, pmd);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200349 } else if (pud_large(*pud)) {
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200350 continue;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200351 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200352 ret = modify_pmd_table(pud, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200353 if (ret)
354 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200355 if (!add)
356 try_free_pmd_table(pud, addr & PUD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200357 }
358 ret = 0;
359out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200360 if (direct)
361 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200362 return ret;
363}
364
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200365static void try_free_pud_table(p4d_t *p4d, unsigned long start)
366{
367 const unsigned long end = start + P4D_SIZE;
368 pud_t *pud;
369 int i;
370
371 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
372 if (end > VMALLOC_START)
373 return;
374#ifdef CONFIG_KASAN
375 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
376 return;
377#endif
378
379 pud = pud_offset(p4d, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200380 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200381 if (!pud_none(*pud))
382 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200383 }
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200384 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
385 p4d_clear(p4d);
386}
387
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200388static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200389 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200390{
391 unsigned long next;
392 int ret = -ENOMEM;
393 p4d_t *p4d;
394 pud_t *pud;
395
396 p4d = p4d_offset(pgd, addr);
397 for (; addr < end; addr = next, p4d++) {
398 next = p4d_addr_end(addr, end);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200399 if (!add) {
400 if (p4d_none(*p4d))
401 continue;
402 } else if (p4d_none(*p4d)) {
403 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
404 if (!pud)
405 goto out;
Vasily Gorbikbffc2f72020-08-21 18:27:36 +0200406 p4d_populate(&init_mm, p4d, pud);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200407 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200408 ret = modify_pud_table(p4d, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200409 if (ret)
410 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200411 if (!add)
412 try_free_pud_table(p4d, addr & P4D_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200413 }
414 ret = 0;
415out:
416 return ret;
417}
418
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200419static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
420{
421 const unsigned long end = start + PGDIR_SIZE;
422 p4d_t *p4d;
423 int i;
424
425 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
426 if (end > VMALLOC_START)
427 return;
428#ifdef CONFIG_KASAN
429 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
430 return;
431#endif
432
433 p4d = p4d_offset(pgd, start);
Heiko Carstens9a996c62020-07-23 21:42:36 +0200434 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200435 if (!p4d_none(*p4d))
436 return;
Heiko Carstens9a996c62020-07-23 21:42:36 +0200437 }
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200438 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
439 pgd_clear(pgd);
440}
441
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200442static int modify_pagetable(unsigned long start, unsigned long end, bool add,
443 bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200444{
445 unsigned long addr, next;
446 int ret = -ENOMEM;
447 pgd_t *pgd;
448 p4d_t *p4d;
449
450 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
451 return -EINVAL;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200452 for (addr = start; addr < end; addr = next) {
453 next = pgd_addr_end(addr, end);
454 pgd = pgd_offset_k(addr);
455
456 if (!add) {
457 if (pgd_none(*pgd))
458 continue;
459 } else if (pgd_none(*pgd)) {
460 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
461 if (!p4d)
462 goto out;
463 pgd_populate(&init_mm, pgd, p4d);
464 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200465 ret = modify_p4d_table(pgd, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200466 if (ret)
467 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200468 if (!add)
469 try_free_p4d_table(pgd, addr & PGDIR_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200470 }
471 ret = 0;
472out:
473 if (!add)
474 flush_tlb_kernel_range(start, end);
475 return ret;
476}
477
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200478static int add_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200479{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200480 return modify_pagetable(start, end, true, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200481}
482
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200483static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200484{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200485 return modify_pagetable(start, end, false, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200486}
487
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100488/*
489 * Add a physical memory range to the 1:1 mapping.
490 */
David Hildenbrand8398b222020-07-22 11:45:50 +0200491static int vmem_add_range(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100492{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200493 return add_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100494}
495
496/*
497 * Remove a physical memory range from the 1:1 mapping.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100498 */
499static void vmem_remove_range(unsigned long start, unsigned long size)
500{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200501 remove_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100502}
503
504/*
505 * Add a backed mem_map array to the virtual mem_map array.
506 */
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100507int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
Heiko Carstens9a996c62020-07-23 21:42:36 +0200508 struct vmem_altmap *altmap)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100509{
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200510 int ret;
511
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200512 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200513 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200514 ret = add_pagetable(start, end, false);
515 if (ret)
516 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200517 mutex_unlock(&vmem_mutex);
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200518 return ret;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100519}
520
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100521void vmemmap_free(unsigned long start, unsigned long end,
Heiko Carstens9a996c62020-07-23 21:42:36 +0200522 struct vmem_altmap *altmap)
Tang Chen01975182013-02-22 16:33:08 -0800523{
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200524 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200525 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200526 mutex_unlock(&vmem_mutex);
Tang Chen01975182013-02-22 16:33:08 -0800527}
528
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200529void vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100530{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100531 mutex_lock(&vmem_mutex);
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200532 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100533 mutex_unlock(&vmem_mutex);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100534}
535
Anshuman Khandual77072482021-02-25 17:17:41 -0800536struct range arch_get_mappable_range(void)
537{
538 struct range mhp_range;
539
540 mhp_range.start = 0;
541 mhp_range.end = VMEM_MAX_PHYS - 1;
542 return mhp_range;
543}
544
Heiko Carstens17f34582008-04-30 13:38:47 +0200545int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100546{
Anshuman Khandual77072482021-02-25 17:17:41 -0800547 struct range range = arch_get_mappable_range();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100548 int ret;
549
Anshuman Khandual77072482021-02-25 17:17:41 -0800550 if (start < range.start ||
551 start + size > range.end + 1 ||
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200552 start + size < start)
553 return -ERANGE;
554
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100555 mutex_lock(&vmem_mutex);
David Hildenbrand8398b222020-07-22 11:45:50 +0200556 ret = vmem_add_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100557 if (ret)
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200558 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100559 mutex_unlock(&vmem_mutex);
560 return ret;
561}
562
563/*
564 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100565 * we reserve enough space in the vmalloc area for vmemmap to hotplug
566 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100567 */
568void __init vmem_map_init(void)
569{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700570 phys_addr_t base, end;
571 u64 i;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100572
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700573 for_each_mem_range(i, &base, &end)
574 vmem_add_range(base, end - base);
Heiko Carstensead7a222017-11-08 11:18:29 +0100575 __set_memory((unsigned long)_stext,
576 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100577 SET_MEMORY_RO | SET_MEMORY_X);
Heiko Carstensead7a222017-11-08 11:18:29 +0100578 __set_memory((unsigned long)_etext,
579 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100580 SET_MEMORY_RO);
Heiko Carstensead7a222017-11-08 11:18:29 +0100581 __set_memory((unsigned long)_sinittext,
582 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100583 SET_MEMORY_RO | SET_MEMORY_X);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100584 __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
585 SET_MEMORY_RO | SET_MEMORY_X);
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100586
587 /* we need lowcore executable for our LPSWE instructions */
588 set_memory_x(0, 1);
589
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100590 pr_info("Write protected kernel read-only data: %luk\n",
Heiko Carstensead7a222017-11-08 11:18:29 +0100591 (unsigned long)(__end_rodata - _stext) >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100592}