blob: b831f9f9130aa82e9fa3086014a0cf90de06e6d3 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01003 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
Mike Rapoport57c8a662018-10-30 15:09:49 -07007#include <linux/memblock.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01008#include <linux/pfn.h>
9#include <linux/mm.h>
Paul Gortmakerff24b072017-02-09 15:20:24 -050010#include <linux/init.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010011#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020012#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020014#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010016#include <asm/setup.h>
17#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020018#include <asm/sections.h>
Laura Abbotte6c7c632017-05-08 15:58:08 -070019#include <asm/set_memory.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010020
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021static DEFINE_MUTEX(vmem_mutex);
22
Heiko Carstens67060d92008-05-30 10:03:27 +020023static void __ref *vmem_alloc_pages(unsigned int order)
24{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020025 unsigned long size = PAGE_SIZE << order;
26
Heiko Carstens67060d92008-05-30 10:03:27 +020027 if (slab_is_available())
28 return (void *)__get_free_pages(GFP_KERNEL, order);
Mike Rapoport9a8dd702018-10-30 15:07:59 -070029 return (void *) memblock_phys_alloc(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020030}
31
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020032static void vmem_free_pages(unsigned long addr, int order)
33{
34 /* We don't expect boot memory to be removed ever. */
35 if (!slab_is_available() ||
36 WARN_ON_ONCE(PageReserved(phys_to_page(addr))))
37 return;
38 free_pages(addr, order);
39}
40
Heiko Carstensa01ef302017-06-16 17:51:15 +020041void *vmem_crst_alloc(unsigned long val)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020042{
Heiko Carstensa01ef302017-06-16 17:51:15 +020043 unsigned long *table;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020044
Heiko Carstensa01ef302017-06-16 17:51:15 +020045 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
46 if (table)
47 crst_table_init(table, val);
48 return table;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010049}
50
Heiko Carstense8a97e42016-05-17 10:50:15 +020051pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Heiko Carstens9e427362016-10-18 13:35:32 +020053 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010054 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010055
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010056 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020057 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010058 else
Mike Rapoport9a8dd702018-10-30 15:07:59 -070059 pte = (pte_t *) memblock_phys_alloc(size, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060 if (!pte)
61 return NULL;
Heiko Carstens41879ff2017-10-04 19:27:07 +020062 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063 return pte;
64}
65
David Hildenbrandb9ff8102020-07-22 11:45:55 +020066static void vmem_pte_free(unsigned long *table)
67{
68 /* We don't expect boot memory to be removed ever. */
69 if (!slab_is_available() ||
70 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
71 return;
72 page_table_free(&init_mm, table);
73}
74
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020075/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
76static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
77 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +020078{
79 unsigned long prot, pages = 0;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020080 int ret = -ENOMEM;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +020081 pte_t *pte;
82
83 prot = pgprot_val(PAGE_KERNEL);
84 if (!MACHINE_HAS_NX)
85 prot &= ~_PAGE_NOEXEC;
86
87 pte = pte_offset_kernel(pmd, addr);
88 for (; addr < end; addr += PAGE_SIZE, pte++) {
89 if (!add) {
90 if (pte_none(*pte))
91 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020092 if (!direct)
93 vmem_free_pages(pfn_to_phys(pte_pfn(*pte)), 0);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +020094 pte_clear(&init_mm, addr, pte);
95 } else if (pte_none(*pte)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +020096 if (!direct) {
97 void *new_page = vmemmap_alloc_block(PAGE_SIZE,
98 NUMA_NO_NODE);
99
100 if (!new_page)
101 goto out;
102 pte_val(*pte) = __pa(new_page) | prot;
103 } else
104 pte_val(*pte) = addr | prot;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200105 } else
106 continue;
107
108 pages++;
109 }
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200110 ret = 0;
111out:
112 if (direct)
113 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
114 return ret;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200115}
116
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200117static void try_free_pte_table(pmd_t *pmd, unsigned long start)
118{
119 pte_t *pte;
120 int i;
121
122 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
123 pte = pte_offset_kernel(pmd, start);
124 for (i = 0; i < PTRS_PER_PTE; i++, pte++)
125 if (!pte_none(*pte))
126 return;
127
128 vmem_pte_free(__va(pmd_deref(*pmd)));
129 pmd_clear(pmd);
130}
131
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200132/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
133static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
134 unsigned long end, bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200135{
136 unsigned long next, prot, pages = 0;
137 int ret = -ENOMEM;
138 pmd_t *pmd;
139 pte_t *pte;
140
141 prot = pgprot_val(SEGMENT_KERNEL);
142 if (!MACHINE_HAS_NX)
143 prot &= ~_SEGMENT_ENTRY_NOEXEC;
144
145 pmd = pmd_offset(pud, addr);
146 for (; addr < end; addr = next, pmd++) {
147 next = pmd_addr_end(addr, end);
148
149 if (!add) {
150 if (pmd_none(*pmd))
151 continue;
152 if (pmd_large(*pmd) && !add) {
153 if (IS_ALIGNED(addr, PMD_SIZE) &&
154 IS_ALIGNED(next, PMD_SIZE)) {
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200155 if (!direct)
156 vmem_free_pages(pmd_deref(*pmd),
157 get_order(PMD_SIZE));
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200158 pmd_clear(pmd);
159 pages++;
160 }
161 continue;
162 }
163 } else if (pmd_none(*pmd)) {
164 if (IS_ALIGNED(addr, PMD_SIZE) &&
165 IS_ALIGNED(next, PMD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200166 MACHINE_HAS_EDAT1 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200167 !debug_pagealloc_enabled()) {
168 pmd_val(*pmd) = addr | prot;
169 pages++;
170 continue;
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200171 } else if (!direct && MACHINE_HAS_EDAT1) {
172 void *new_page;
173
174 /*
175 * Use 1MB frames for vmemmap if available. We
176 * always use large frames even if they are only
177 * partially used. Otherwise we would have also
178 * page tables since vmemmap_populate gets
179 * called for each section separately.
180 */
181 new_page = vmemmap_alloc_block(PMD_SIZE,
182 NUMA_NO_NODE);
183 if (!new_page)
184 goto out;
185 pmd_val(*pmd) = __pa(new_page) | prot;
186 continue;
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200187 }
188 pte = vmem_pte_alloc();
189 if (!pte)
190 goto out;
191 pmd_populate(&init_mm, pmd, pte);
192 } else if (pmd_large(*pmd))
193 continue;
194
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200195 ret = modify_pte_table(pmd, addr, next, add, direct);
196 if (ret)
197 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200198 if (!add)
199 try_free_pte_table(pmd, addr & PMD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200200 }
201 ret = 0;
202out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200203 if (direct)
204 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200205 return ret;
206}
207
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200208static void try_free_pmd_table(pud_t *pud, unsigned long start)
209{
210 const unsigned long end = start + PUD_SIZE;
211 pmd_t *pmd;
212 int i;
213
214 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
215 if (end > VMALLOC_START)
216 return;
217#ifdef CONFIG_KASAN
218 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
219 return;
220#endif
221
222 pmd = pmd_offset(pud, start);
223 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
224 if (!pmd_none(*pmd))
225 return;
226
227 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
228 pud_clear(pud);
229}
230
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200231static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200232 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200233{
234 unsigned long next, prot, pages = 0;
235 int ret = -ENOMEM;
236 pud_t *pud;
237 pmd_t *pmd;
238
239 prot = pgprot_val(REGION3_KERNEL);
240 if (!MACHINE_HAS_NX)
241 prot &= ~_REGION_ENTRY_NOEXEC;
242
243 pud = pud_offset(p4d, addr);
244 for (; addr < end; addr = next, pud++) {
245 next = pud_addr_end(addr, end);
246
247 if (!add) {
248 if (pud_none(*pud))
249 continue;
250 if (pud_large(*pud)) {
251 if (IS_ALIGNED(addr, PUD_SIZE) &&
252 IS_ALIGNED(next, PUD_SIZE)) {
253 pud_clear(pud);
254 pages++;
255 }
256 continue;
257 }
258 } else if (pud_none(*pud)) {
259 if (IS_ALIGNED(addr, PUD_SIZE) &&
260 IS_ALIGNED(next, PUD_SIZE) &&
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200261 MACHINE_HAS_EDAT2 && addr && direct &&
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200262 !debug_pagealloc_enabled()) {
263 pud_val(*pud) = addr | prot;
264 pages++;
265 continue;
266 }
267 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
268 if (!pmd)
269 goto out;
270 pud_populate(&init_mm, pud, pmd);
271 } else if (pud_large(*pud))
272 continue;
273
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200274 ret = modify_pmd_table(pud, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200275 if (ret)
276 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200277 if (!add)
278 try_free_pmd_table(pud, addr & PUD_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200279 }
280 ret = 0;
281out:
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200282 if (direct)
283 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200284 return ret;
285}
286
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200287static void try_free_pud_table(p4d_t *p4d, unsigned long start)
288{
289 const unsigned long end = start + P4D_SIZE;
290 pud_t *pud;
291 int i;
292
293 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
294 if (end > VMALLOC_START)
295 return;
296#ifdef CONFIG_KASAN
297 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
298 return;
299#endif
300
301 pud = pud_offset(p4d, start);
302 for (i = 0; i < PTRS_PER_PUD; i++, pud++)
303 if (!pud_none(*pud))
304 return;
305
306 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
307 p4d_clear(p4d);
308}
309
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200310static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200311 bool add, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200312{
313 unsigned long next;
314 int ret = -ENOMEM;
315 p4d_t *p4d;
316 pud_t *pud;
317
318 p4d = p4d_offset(pgd, addr);
319 for (; addr < end; addr = next, p4d++) {
320 next = p4d_addr_end(addr, end);
321
322 if (!add) {
323 if (p4d_none(*p4d))
324 continue;
325 } else if (p4d_none(*p4d)) {
326 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
327 if (!pud)
328 goto out;
329 }
330
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200331 ret = modify_pud_table(p4d, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200332 if (ret)
333 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200334 if (!add)
335 try_free_pud_table(p4d, addr & P4D_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200336 }
337 ret = 0;
338out:
339 return ret;
340}
341
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200342static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
343{
344 const unsigned long end = start + PGDIR_SIZE;
345 p4d_t *p4d;
346 int i;
347
348 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
349 if (end > VMALLOC_START)
350 return;
351#ifdef CONFIG_KASAN
352 if (start < KASAN_SHADOW_END && KASAN_SHADOW_START > end)
353 return;
354#endif
355
356 p4d = p4d_offset(pgd, start);
357 for (i = 0; i < PTRS_PER_P4D; i++, p4d++)
358 if (!p4d_none(*p4d))
359 return;
360
361 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
362 pgd_clear(pgd);
363}
364
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200365static int modify_pagetable(unsigned long start, unsigned long end, bool add,
366 bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200367{
368 unsigned long addr, next;
369 int ret = -ENOMEM;
370 pgd_t *pgd;
371 p4d_t *p4d;
372
373 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
374 return -EINVAL;
375
376 for (addr = start; addr < end; addr = next) {
377 next = pgd_addr_end(addr, end);
378 pgd = pgd_offset_k(addr);
379
380 if (!add) {
381 if (pgd_none(*pgd))
382 continue;
383 } else if (pgd_none(*pgd)) {
384 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
385 if (!p4d)
386 goto out;
387 pgd_populate(&init_mm, pgd, p4d);
388 }
389
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200390 ret = modify_p4d_table(pgd, addr, next, add, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200391 if (ret)
392 goto out;
David Hildenbrandb9ff8102020-07-22 11:45:55 +0200393 if (!add)
394 try_free_p4d_table(pgd, addr & PGDIR_MASK);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200395 }
396 ret = 0;
397out:
398 if (!add)
399 flush_tlb_kernel_range(start, end);
400 return ret;
401}
402
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200403static int add_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200404{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200405 return modify_pagetable(start, end, true, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200406}
407
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200408static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200409{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200410 return modify_pagetable(start, end, false, direct);
David Hildenbrand3e0d3e42020-07-22 11:45:51 +0200411}
412
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100413/*
414 * Add a physical memory range to the 1:1 mapping.
415 */
David Hildenbrand8398b222020-07-22 11:45:50 +0200416static int vmem_add_range(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100417{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200418 return add_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100419}
420
421/*
422 * Remove a physical memory range from the 1:1 mapping.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100423 */
424static void vmem_remove_range(unsigned long start, unsigned long size)
425{
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200426 remove_pagetable(start, start + size, true);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100427}
428
429/*
430 * Add a backed mem_map array to the virtual mem_map array.
431 */
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100432int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
433 struct vmem_altmap *altmap)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100434{
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200435 int ret;
436
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200437 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200438 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200439 ret = add_pagetable(start, end, false);
440 if (ret)
441 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200442 mutex_unlock(&vmem_mutex);
David Hildenbrandc00f05a2020-07-22 11:45:53 +0200443 return ret;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100444}
445
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100446void vmemmap_free(unsigned long start, unsigned long end,
447 struct vmem_altmap *altmap)
Tang Chen01975182013-02-22 16:33:08 -0800448{
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200449 mutex_lock(&vmem_mutex);
David Hildenbrand9ec8fa82020-07-22 11:45:52 +0200450 remove_pagetable(start, end, false);
David Hildenbrandaa18e0e2020-07-22 11:45:54 +0200451 mutex_unlock(&vmem_mutex);
Tang Chen01975182013-02-22 16:33:08 -0800452}
453
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200454void vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100455{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100456 mutex_lock(&vmem_mutex);
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200457 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100458 mutex_unlock(&vmem_mutex);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100459}
460
Heiko Carstens17f34582008-04-30 13:38:47 +0200461int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100462{
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100463 int ret;
464
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200465 if (start + size > VMEM_MAX_PHYS ||
466 start + size < start)
467 return -ERANGE;
468
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100469 mutex_lock(&vmem_mutex);
David Hildenbrand8398b222020-07-22 11:45:50 +0200470 ret = vmem_add_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100471 if (ret)
David Hildenbrandf05f62d2020-06-25 17:00:29 +0200472 vmem_remove_range(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100473 mutex_unlock(&vmem_mutex);
474 return ret;
475}
476
477/*
478 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100479 * we reserve enough space in the vmalloc area for vmemmap to hotplug
480 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100481 */
482void __init vmem_map_init(void)
483{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100484 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100485
Heiko Carstensbab247f2016-05-10 16:28:28 +0200486 for_each_memblock(memory, reg)
David Hildenbrand8398b222020-07-22 11:45:50 +0200487 vmem_add_range(reg->base, reg->size);
Heiko Carstensead7a222017-11-08 11:18:29 +0100488 __set_memory((unsigned long)_stext,
489 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100490 SET_MEMORY_RO | SET_MEMORY_X);
Heiko Carstensead7a222017-11-08 11:18:29 +0100491 __set_memory((unsigned long)_etext,
492 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100493 SET_MEMORY_RO);
Heiko Carstensead7a222017-11-08 11:18:29 +0100494 __set_memory((unsigned long)_sinittext,
495 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100496 SET_MEMORY_RO | SET_MEMORY_X);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100497 __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
498 SET_MEMORY_RO | SET_MEMORY_X);
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100499
500 /* we need lowcore executable for our LPSWE instructions */
501 set_memory_x(0, 1);
502
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100503 pr_info("Write protected kernel read-only data: %luk\n",
Heiko Carstensead7a222017-11-08 11:18:29 +0100504 (unsigned long)(__end_rodata - _stext) >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100505}