blob: c0af0d7b6e5feeb348f8c7bfd2e107a3ff4ab295 [file] [log] [blame]
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01001/*
Heiko Carstensf4eb07c2006-12-08 15:56:07 +01002 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#include <linux/bootmem.h>
7#include <linux/pfn.h>
8#include <linux/mm.h>
Paul Gortmakerff24b072017-02-09 15:20:24 -05009#include <linux/init.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010010#include <linux/list.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020011#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Philipp Hachtmann50be6342014-01-29 18:16:01 +010013#include <linux/memblock.h>
Heiko Carstensbab247f2016-05-10 16:28:28 +020014#include <asm/cacheflush.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010015#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
Gerald Schaefer53492b12008-04-30 13:38:46 +020019#include <asm/sections.h>
Laura Abbotte6c7c632017-05-08 15:58:08 -070020#include <asm/set_memory.h>
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010021
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010022static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
Heiko Carstens67060d92008-05-30 10:03:27 +020032static void __ref *vmem_alloc_pages(unsigned int order)
33{
Heiko Carstens2e9996f2016-05-13 11:10:09 +020034 unsigned long size = PAGE_SIZE << order;
35
Heiko Carstens67060d92008-05-30 10:03:27 +020036 if (slab_is_available())
37 return (void *)__get_free_pages(GFP_KERNEL, order);
Heiko Carstens9e427362016-10-18 13:35:32 +020038 return (void *) memblock_alloc(size, size);
Heiko Carstens67060d92008-05-30 10:03:27 +020039}
40
Heiko Carstensa01ef302017-06-16 17:51:15 +020041void *vmem_crst_alloc(unsigned long val)
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020042{
Heiko Carstensa01ef302017-06-16 17:51:15 +020043 unsigned long *table;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020044
Heiko Carstensa01ef302017-06-16 17:51:15 +020045 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
46 if (table)
47 crst_table_init(table, val);
48 return table;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010049}
50
Heiko Carstense8a97e42016-05-17 10:50:15 +020051pte_t __ref *vmem_pte_alloc(void)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010052{
Heiko Carstens9e427362016-10-18 13:35:32 +020053 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010054 pte_t *pte;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010055
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010056 if (slab_is_available())
Martin Schwidefsky527e30b2014-04-30 16:04:25 +020057 pte = (pte_t *) page_table_alloc(&init_mm);
Martin Schwidefsky146e4b32008-02-09 18:24:35 +010058 else
Heiko Carstens9e427362016-10-18 13:35:32 +020059 pte = (pte_t *) memblock_alloc(size, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010060 if (!pte)
61 return NULL;
Heiko Carstens9e427362016-10-18 13:35:32 +020062 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010063 return pte;
64}
65
66/*
67 * Add a physical memory range to the 1:1 mapping.
68 */
Heiko Carstensbab247f2016-05-10 16:28:28 +020069static int vmem_add_mem(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010070{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +010071 unsigned long pgt_prot, sgt_prot, r3_prot;
Heiko Carstens37cd9442016-05-20 08:08:14 +020072 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +020073 unsigned long end = start + size;
74 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010075 pgd_t *pg_dir;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020076 p4d_t *p4_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +020077 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010078 pmd_t *pm_dir;
79 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010080 int ret = -ENOMEM;
81
Martin Schwidefsky57d7f932016-03-22 10:54:24 +010082 pgt_prot = pgprot_val(PAGE_KERNEL);
83 sgt_prot = pgprot_val(SEGMENT_KERNEL);
84 r3_prot = pgprot_val(REGION3_KERNEL);
85 if (!MACHINE_HAS_NX) {
86 pgt_prot &= ~_PAGE_NOEXEC;
87 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
88 r3_prot &= ~_REGION_ENTRY_NOEXEC;
89 }
Heiko Carstens37cd9442016-05-20 08:08:14 +020090 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +020091 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +010092 pg_dir = pgd_offset_k(address);
93 if (pgd_none(*pg_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +020094 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +020095 if (!p4_dir)
96 goto out;
97 pgd_populate(&init_mm, pg_dir, p4_dir);
98 }
99 p4_dir = p4d_offset(pg_dir, address);
100 if (p4d_none(*p4_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +0200101 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200102 if (!pu_dir)
103 goto out;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200104 p4d_populate(&init_mm, p4_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200105 }
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200106 pu_dir = pud_offset(p4_dir, address);
Heiko Carstens18da2362012-10-08 09:18:26 +0200107 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700108 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
109 !debug_pagealloc_enabled()) {
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100110 pud_val(*pu_dir) = address | r3_prot;
Heiko Carstens18da2362012-10-08 09:18:26 +0200111 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200112 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200113 continue;
114 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200115 if (pud_none(*pu_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +0200116 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100117 if (!pm_dir)
118 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200119 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100120 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200121 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200122 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
Christian Borntraeger10917b82016-03-15 14:57:36 -0700123 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
124 !debug_pagealloc_enabled()) {
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100125 pmd_val(*pm_dir) = address | sgt_prot;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200126 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200127 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200128 continue;
129 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100130 if (pmd_none(*pm_dir)) {
Heiko Carstensc53db522016-05-09 15:52:28 +0200131 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100132 if (!pt_dir)
133 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200134 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100135 }
136
137 pt_dir = pte_offset_kernel(pm_dir, address);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100138 pte_val(*pt_dir) = address | pgt_prot;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200139 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200140 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100141 }
142 ret = 0;
143out:
Heiko Carstens37cd9442016-05-20 08:08:14 +0200144 update_page_count(PG_DIRECT_MAP_4K, pages4k);
145 update_page_count(PG_DIRECT_MAP_1M, pages1m);
146 update_page_count(PG_DIRECT_MAP_2G, pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100147 return ret;
148}
149
150/*
151 * Remove a physical memory range from the 1:1 mapping.
152 * Currently only invalidates page table entries.
153 */
154static void vmem_remove_range(unsigned long start, unsigned long size)
155{
Heiko Carstens37cd9442016-05-20 08:08:14 +0200156 unsigned long pages4k, pages1m, pages2g;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200157 unsigned long end = start + size;
158 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100159 pgd_t *pg_dir;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200160 p4d_t *p4_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200161 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100162 pmd_t *pm_dir;
163 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100164
Heiko Carstens37cd9442016-05-20 08:08:14 +0200165 pages4k = pages1m = pages2g = 0;
Heiko Carstens378b1e72012-10-01 12:58:34 +0200166 while (address < end) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100167 pg_dir = pgd_offset_k(address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200168 if (pgd_none(*pg_dir)) {
169 address += PGDIR_SIZE;
170 continue;
171 }
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200172 p4_dir = p4d_offset(pg_dir, address);
173 if (p4d_none(*p4_dir)) {
174 address += P4D_SIZE;
175 continue;
176 }
177 pu_dir = pud_offset(p4_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200178 if (pud_none(*pu_dir)) {
179 address += PUD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100180 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200181 }
Heiko Carstens18da2362012-10-08 09:18:26 +0200182 if (pud_large(*pu_dir)) {
183 pud_clear(pu_dir);
184 address += PUD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200185 pages2g++;
Heiko Carstens18da2362012-10-08 09:18:26 +0200186 continue;
187 }
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200188 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200189 if (pmd_none(*pm_dir)) {
190 address += PMD_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100191 continue;
Heiko Carstensfc7e48aa2012-10-08 07:54:32 +0200192 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200193 if (pmd_large(*pm_dir)) {
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200194 pmd_clear(pm_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200195 address += PMD_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200196 pages1m++;
Gerald Schaefer53492b12008-04-30 13:38:46 +0200197 continue;
198 }
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100199 pt_dir = pte_offset_kernel(pm_dir, address);
Heiko Carstens5aa29972016-05-17 12:17:51 +0200200 pte_clear(&init_mm, address, pt_dir);
Heiko Carstens378b1e72012-10-01 12:58:34 +0200201 address += PAGE_SIZE;
Heiko Carstens37cd9442016-05-20 08:08:14 +0200202 pages4k++;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100203 }
Heiko Carstens378b1e72012-10-01 12:58:34 +0200204 flush_tlb_kernel_range(start, end);
Heiko Carstens37cd9442016-05-20 08:08:14 +0200205 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
206 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
207 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100208}
209
210/*
211 * Add a backed mem_map array to the virtual mem_map array.
212 */
Johannes Weiner0aad8182013-04-29 15:07:50 -0700213int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100214{
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100215 unsigned long pgt_prot, sgt_prot;
Johannes Weiner0aad8182013-04-29 15:07:50 -0700216 unsigned long address = start;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100217 pgd_t *pg_dir;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200218 p4d_t *p4_dir;
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200219 pud_t *pu_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100220 pmd_t *pm_dir;
221 pte_t *pt_dir;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100222 int ret = -ENOMEM;
223
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100224 pgt_prot = pgprot_val(PAGE_KERNEL);
225 sgt_prot = pgprot_val(SEGMENT_KERNEL);
226 if (!MACHINE_HAS_NX) {
227 pgt_prot &= ~_PAGE_NOEXEC;
228 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
229 }
Johannes Weiner0aad8182013-04-29 15:07:50 -0700230 for (address = start; address < end;) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100231 pg_dir = pgd_offset_k(address);
232 if (pgd_none(*pg_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +0200233 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200234 if (!p4_dir)
235 goto out;
236 pgd_populate(&init_mm, pg_dir, p4_dir);
237 }
238
239 p4_dir = p4d_offset(pg_dir, address);
240 if (p4d_none(*p4_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +0200241 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200242 if (!pu_dir)
243 goto out;
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200244 p4d_populate(&init_mm, p4_dir, pu_dir);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200245 }
246
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200247 pu_dir = pud_offset(p4_dir, address);
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200248 if (pud_none(*pu_dir)) {
Heiko Carstensa01ef302017-06-16 17:51:15 +0200249 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100250 if (!pm_dir)
251 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200252 pud_populate(&init_mm, pu_dir, pm_dir);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100253 }
254
Martin Schwidefsky190a1d72007-10-22 12:52:48 +0200255 pm_dir = pmd_offset(pu_dir, address);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100256 if (pmd_none(*pm_dir)) {
Heiko Carstensf7817962012-10-17 12:18:05 +0200257 /* Use 1MB frames for vmemmap if available. We always
258 * use large frames even if they are only partially
259 * used.
260 * Otherwise we would have also page tables since
261 * vmemmap_populate gets called for each section
262 * separately. */
263 if (MACHINE_HAS_EDAT1) {
264 void *new_page;
265
266 new_page = vmemmap_alloc_block(PMD_SIZE, node);
267 if (!new_page)
268 goto out;
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100269 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
Heiko Carstensf7817962012-10-17 12:18:05 +0200270 address = (address + PMD_SIZE) & PMD_MASK;
271 continue;
272 }
Heiko Carstensc53db522016-05-09 15:52:28 +0200273 pt_dir = vmem_pte_alloc();
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100274 if (!pt_dir)
275 goto out;
Martin Schwidefskyb2fa47e2011-05-23 10:24:40 +0200276 pmd_populate(&init_mm, pm_dir, pt_dir);
Heiko Carstensf7817962012-10-17 12:18:05 +0200277 } else if (pmd_large(*pm_dir)) {
278 address = (address + PMD_SIZE) & PMD_MASK;
279 continue;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100280 }
281
282 pt_dir = pte_offset_kernel(pm_dir, address);
283 if (pte_none(*pt_dir)) {
Heiko Carstens70c9d292014-09-20 11:12:08 +0200284 void *new_page;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100285
Heiko Carstens70c9d292014-09-20 11:12:08 +0200286 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100287 if (!new_page)
288 goto out;
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100289 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100290 }
Heiko Carstensf7817962012-10-17 12:18:05 +0200291 address += PAGE_SIZE;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100292 }
293 ret = 0;
294out:
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100295 return ret;
296}
297
Johannes Weiner0aad8182013-04-29 15:07:50 -0700298void vmemmap_free(unsigned long start, unsigned long end)
Tang Chen01975182013-02-22 16:33:08 -0800299{
300}
301
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100302/*
303 * Add memory segment to the segment list if it doesn't overlap with
304 * an already present segment.
305 */
306static int insert_memory_segment(struct memory_segment *seg)
307{
308 struct memory_segment *tmp;
309
Heiko Carstensee0ddad2008-06-10 10:03:20 +0200310 if (seg->start + seg->size > VMEM_MAX_PHYS ||
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100311 seg->start + seg->size < seg->start)
312 return -ERANGE;
313
314 list_for_each_entry(tmp, &mem_segs, list) {
315 if (seg->start >= tmp->start + tmp->size)
316 continue;
317 if (seg->start + seg->size <= tmp->start)
318 continue;
319 return -ENOSPC;
320 }
321 list_add(&seg->list, &mem_segs);
322 return 0;
323}
324
325/*
326 * Remove memory segment from the segment list.
327 */
328static void remove_memory_segment(struct memory_segment *seg)
329{
330 list_del(&seg->list);
331}
332
333static void __remove_shared_memory(struct memory_segment *seg)
334{
335 remove_memory_segment(seg);
336 vmem_remove_range(seg->start, seg->size);
337}
338
Heiko Carstens17f34582008-04-30 13:38:47 +0200339int vmem_remove_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100340{
341 struct memory_segment *seg;
342 int ret;
343
344 mutex_lock(&vmem_mutex);
345
346 ret = -ENOENT;
347 list_for_each_entry(seg, &mem_segs, list) {
348 if (seg->start == start && seg->size == size)
349 break;
350 }
351
352 if (seg->start != start || seg->size != size)
353 goto out;
354
355 ret = 0;
356 __remove_shared_memory(seg);
357 kfree(seg);
358out:
359 mutex_unlock(&vmem_mutex);
360 return ret;
361}
362
Heiko Carstens17f34582008-04-30 13:38:47 +0200363int vmem_add_mapping(unsigned long start, unsigned long size)
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100364{
365 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100366 int ret;
367
368 mutex_lock(&vmem_mutex);
369 ret = -ENOMEM;
370 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
371 if (!seg)
372 goto out;
373 seg->start = start;
374 seg->size = size;
375
376 ret = insert_memory_segment(seg);
377 if (ret)
378 goto out_free;
379
Heiko Carstensbab247f2016-05-10 16:28:28 +0200380 ret = vmem_add_mem(start, size);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100381 if (ret)
382 goto out_remove;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100383 goto out;
384
385out_remove:
386 __remove_shared_memory(seg);
387out_free:
388 kfree(seg);
389out:
390 mutex_unlock(&vmem_mutex);
391 return ret;
392}
393
394/*
395 * map whole physical memory to virtual memory (identity mapping)
Christian Borntraeger5fd9c6e2008-01-26 14:11:00 +0100396 * we reserve enough space in the vmalloc area for vmemmap to hotplug
397 * additional memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100398 */
399void __init vmem_map_init(void)
400{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100401 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100402
Heiko Carstensbab247f2016-05-10 16:28:28 +0200403 for_each_memblock(memory, reg)
404 vmem_add_mem(reg->base, reg->size);
Martin Schwidefsky57d7f932016-03-22 10:54:24 +0100405 __set_memory((unsigned long) _stext,
406 (_etext - _stext) >> PAGE_SHIFT,
407 SET_MEMORY_RO | SET_MEMORY_X);
408 __set_memory((unsigned long) _etext,
409 (_eshared - _etext) >> PAGE_SHIFT,
410 SET_MEMORY_RO);
411 __set_memory((unsigned long) _sinittext,
412 (_einittext - _sinittext) >> PAGE_SHIFT,
413 SET_MEMORY_RO | SET_MEMORY_X);
414 pr_info("Write protected kernel read-only data: %luk\n",
415 (_eshared - _stext) >> 10);
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100416}
417
418/*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100419 * Convert memblock.memory to a memory segment list so there is a single
420 * list that contains all memory segments.
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100421 */
422static int __init vmem_convert_memory_chunk(void)
423{
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100424 struct memblock_region *reg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100425 struct memory_segment *seg;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100426
427 mutex_lock(&vmem_mutex);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100428 for_each_memblock(memory, reg) {
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100429 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
430 if (!seg)
431 panic("Out of memory...\n");
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100432 seg->start = reg->base;
433 seg->size = reg->size;
Heiko Carstensf4eb07c2006-12-08 15:56:07 +0100434 insert_memory_segment(seg);
435 }
436 mutex_unlock(&vmem_mutex);
437 return 0;
438}
439
440core_initcall(vmem_convert_memory_chunk);