blob: 30885d0b94acfe449dcf6d7470b6aad51e53b3cc [file] [log] [blame]
Alex Dewar0d1fb0a2019-08-25 10:49:17 +01001// SPDX-License-Identifier: GPL-2.0
Jeff Dike114069f2005-09-16 19:27:51 -07002/*
Jeff Dike009ec2a92008-02-04 22:30:53 -08003 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 */
5
Jeff Dike009ec2a92008-02-04 22:30:53 -08006#include <linux/stddef.h>
Al Viro73395a02011-08-18 20:14:10 +01007#include <linux/module.h>
Mike Rapoport20132882018-10-30 15:09:21 -07008#include <linux/memblock.h>
Jeff Dike009ec2a92008-02-04 22:30:53 -08009#include <linux/highmem.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/slab.h>
Jeff Dike009ec2a92008-02-04 22:30:53 -080013#include <asm/fixmap.h>
14#include <asm/page.h>
Al Viro37185b32012-10-08 03:27:32 +010015#include <as-layout.h>
16#include <init.h>
17#include <kern.h>
18#include <kern_util.h>
19#include <mem_user.h>
20#include <os.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Jeff Dike6bf79482007-02-10 01:44:18 -080022/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
Linus Torvalds1da177e2005-04-16 15:20:36 -070023unsigned long *empty_zero_page = NULL;
Al Viro73395a02011-08-18 20:14:10 +010024EXPORT_SYMBOL(empty_zero_page);
Jeff Dike80e39312008-02-04 22:31:17 -080025
26/*
27 * Initialized during boot, and readonly for initializing page tables
28 * afterwards
29 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030pgd_t swapper_pg_dir[PTRS_PER_PGD];
Jeff Dike80e39312008-02-04 22:31:17 -080031
32/* Initialized at boot time, and readonly after that */
Jeff Dike9902abd2006-03-31 02:30:09 -080033unsigned long long highmem;
Erel Geron5d38f322019-09-11 14:51:20 +020034EXPORT_SYMBOL(highmem);
Linus Torvalds1da177e2005-04-16 15:20:36 -070035int kmalloc_ok = 0;
36
Jeff Dike80e39312008-02-04 22:31:17 -080037/* Used during early boot */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static unsigned long brk_end;
39
Jeff Dike97a1fcb2007-07-23 18:43:48 -070040void __init mem_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
Jeff Dike60678bb2007-02-10 01:44:10 -080042 /* clear the zero-page */
WANG Congc0a92902008-02-04 22:30:41 -080043 memset(empty_zero_page, 0, PAGE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45 /* Map in the area just after the brk now that kmalloc is about
46 * to be turned on.
47 */
48 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
Jeff Dikeab26a522008-02-04 22:31:24 -080049 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
Mike Rapoport20132882018-10-30 15:09:21 -070050 memblock_free(__pa(brk_end), uml_reserved - brk_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 uml_reserved = brk_end;
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 /* this will put all low memory onto the freelists */
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -070054 memblock_free_all();
Arun KSca79b0c2018-12-28 00:34:29 -080055 max_low_pfn = totalram_pages();
Arun KS3d6357d2018-12-28 00:34:20 -080056 max_pfn = max_low_pfn;
Jiang Liu715ee352013-07-03 15:04:16 -070057 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 kmalloc_ok = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059}
60
Jeff Dike12f49642005-05-20 13:59:12 -070061/*
62 * Create a page table and place a pointer to it in a middle page
63 * directory entry.
64 */
65static void __init one_page_table_init(pmd_t *pmd)
66{
67 if (pmd_none(*pmd)) {
Mike Rapoporte8625dc2018-10-30 15:08:54 -070068 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
69 PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070070 if (!pte)
71 panic("%s: Failed to allocate %lu bytes align=%lx\n",
72 __func__, PAGE_SIZE, PAGE_SIZE);
73
Jeff Dike12f49642005-05-20 13:59:12 -070074 set_pmd(pmd, __pmd(_KERNPG_TABLE +
75 (unsigned long) __pa(pte)));
76 if (pte != pte_offset_kernel(pmd, 0))
77 BUG();
78 }
79}
80
81static void __init one_md_table_init(pud_t *pud)
82{
83#ifdef CONFIG_3_LEVEL_PGTABLES
Mike Rapoporte8625dc2018-10-30 15:08:54 -070084 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -070085 if (!pmd_table)
86 panic("%s: Failed to allocate %lu bytes align=%lx\n",
87 __func__, PAGE_SIZE, PAGE_SIZE);
88
Jeff Dike12f49642005-05-20 13:59:12 -070089 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
90 if (pmd_table != pmd_offset(pud, 0))
91 BUG();
92#endif
93}
94
Jeff Dike009ec2a92008-02-04 22:30:53 -080095static void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 pgd_t *pgd_base)
97{
98 pgd_t *pgd;
Mike Rapoporte19f97e2019-12-04 16:54:28 -080099 p4d_t *p4d;
Jeff Dike12f49642005-05-20 13:59:12 -0700100 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 pmd_t *pmd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 int i, j;
103 unsigned long vaddr;
104
105 vaddr = start;
106 i = pgd_index(vaddr);
107 j = pmd_index(vaddr);
108 pgd = pgd_base + i;
109
110 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
Mike Rapoporte19f97e2019-12-04 16:54:28 -0800111 p4d = p4d_offset(pgd, vaddr);
112 pud = pud_offset(p4d, vaddr);
Jeff Dike12f49642005-05-20 13:59:12 -0700113 if (pud_none(*pud))
114 one_md_table_init(pud);
115 pmd = pmd_offset(pud, vaddr);
Jeff Dike655e4ed2008-02-04 22:30:55 -0800116 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
Jeff Dike12f49642005-05-20 13:59:12 -0700117 one_page_table_init(pmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 vaddr += PMD_SIZE;
119 }
120 j = 0;
121 }
122}
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124static void __init fixaddr_user_init( void)
125{
viro@ZenIV.linux.org.uk9a0b3862005-09-07 23:21:11 +0100126#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 long size = FIXADDR_USER_END - FIXADDR_USER_START;
128 pgd_t *pgd;
Mike Rapoporte19f97e2019-12-04 16:54:28 -0800129 p4d_t *p4d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 pud_t *pud;
131 pmd_t *pmd;
132 pte_t *pte;
Jeff Dike655e4ed2008-02-04 22:30:55 -0800133 phys_t p;
134 unsigned long v, vaddr = FIXADDR_USER_START;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Jeff Dike655e4ed2008-02-04 22:30:55 -0800136 if (!size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return;
138
139 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
Mike Rapoporte8625dc2018-10-30 15:08:54 -0700140 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700141 if (!v)
142 panic("%s: Failed to allocate %lu bytes align=%lx\n",
143 __func__, size, PAGE_SIZE);
144
Jeff Dike655e4ed2008-02-04 22:30:55 -0800145 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
146 p = __pa(v);
Jeff Dike009ec2a92008-02-04 22:30:53 -0800147 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
Jeff Dike655e4ed2008-02-04 22:30:55 -0800148 p += PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 pgd = swapper_pg_dir + pgd_index(vaddr);
Mike Rapoporte19f97e2019-12-04 16:54:28 -0800150 p4d = p4d_offset(pgd, vaddr);
151 pud = pud_offset(p4d, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 pmd = pmd_offset(pud, vaddr);
153 pte = pte_offset_kernel(pmd, vaddr);
Jeff Dike655e4ed2008-02-04 22:30:55 -0800154 pte_set_val(*pte, p, PAGE_READONLY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156#endif
157}
158
Jeff Dike36e45462007-05-06 14:51:11 -0700159void __init paging_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{
161 unsigned long zones_size[MAX_NR_ZONES], vaddr;
162 int i;
163
Mike Rapoporte8625dc2018-10-30 15:08:54 -0700164 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
165 PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700166 if (!empty_zero_page)
167 panic("%s: Failed to allocate %lu bytes align=%lx\n",
168 __func__, PAGE_SIZE, PAGE_SIZE);
169
Jeff Dike009ec2a92008-02-04 22:30:53 -0800170 for (i = 0; i < ARRAY_SIZE(zones_size); i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 zones_size[i] = 0;
Jeff Dike91b165c2006-09-25 23:33:00 -0700172
Jeff Dike07155012006-09-27 01:50:34 -0700173 zones_size[ZONE_NORMAL] = (end_iomem >> PAGE_SHIFT) -
174 (uml_physmem >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 free_area_init(zones_size);
176
177 /*
178 * Fixed mappings, only the page table structure has to be
179 * created - mappings will be set by set_fixmap():
180 */
181 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
182 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
183
184 fixaddr_user_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Jeff Dike8192ab42008-02-04 22:30:53 -0800187/*
188 * This can't do anything because nothing in the kernel image can be freed
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * since it's not in kernel physical memory.
190 */
191
192void free_initmem(void)
193{
194}
195
Jeff Dike8192ab42008-02-04 22:30:53 -0800196/* Allocate and free page tables. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198pgd_t *pgd_alloc(struct mm_struct *mm)
199{
200 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
201
202 if (pgd) {
203 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
Jeff Dike009ec2a92008-02-04 22:30:53 -0800204 memcpy(pgd + USER_PTRS_PER_PGD,
205 swapper_pg_dir + USER_PTRS_PER_PGD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
207 }
208 return pgd;
209}
210
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800211void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212{
213 free_page((unsigned long) pgd);
214}
215
Jeff Dike8192ab42008-02-04 22:30:53 -0800216#ifdef CONFIG_3_LEVEL_PGTABLES
217pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
218{
219 pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
220
221 if (pmd)
222 memset(pmd, 0, PAGE_SIZE);
223
224 return pmd;
225}
226#endif
Jeff Dike43f5b302008-05-12 14:01:52 -0700227
228void *uml_kmalloc(int size, int flags)
229{
230 return kmalloc(size, flags);
231}