blob: a4e17a979e45abf0b6b72a96fba3214e2f4b4ac7 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +10009 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015 */
16
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +100017#undef DEBUG
18
Paul Mackerras14cf11a2005-09-26 16:04:21 +100019#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/stddef.h>
29#include <linux/vmalloc.h>
30#include <linux/init.h>
31#include <linux/delay.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100032#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/nodemask.h>
35#include <linux/module.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070036#include <linux/poison.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100037#include <linux/memblock.h>
David Gibsona4fe3ce2009-10-26 19:24:31 +000038#include <linux/hugetlb.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Paul Mackerras18569c12017-01-30 21:21:34 +110040#include <linux/of_fdt.h>
41#include <linux/libfdt.h>
Oliver O'Halloranb584c252017-06-28 11:32:33 +100042#include <linux/memremap.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/pgtable.h>
51#include <asm/mmu.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080052#include <linux/uaccess.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100053#include <asm/smp.h>
54#include <asm/machdep.h>
55#include <asm/tlb.h>
56#include <asm/eeh.h>
57#include <asm/processor.h>
58#include <asm/mmzone.h>
59#include <asm/cputable.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100060#include <asm/sections.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100061#include <asm/iommu.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100062#include <asm/vdso.h>
David Gibson800fc3e2005-11-16 15:43:48 +110063
Christophe Leroy9d9f2cc2019-03-29 09:59:59 +000064#include <mm/mmu_decl.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100065
Kumar Gala37dd2ba2008-04-22 04:22:34 +100066phys_addr_t memstart_addr = ~0;
Sonny Rao79c30952010-08-19 18:08:09 +000067EXPORT_SYMBOL_GPL(memstart_addr);
Kumar Gala37dd2ba2008-04-22 04:22:34 +100068phys_addr_t kernstart_addr;
Sonny Rao79c30952010-08-19 18:08:09 +000069EXPORT_SYMBOL_GPL(kernstart_addr);
Kumar Galad7917ba2008-04-16 05:52:22 +100070
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -070071#ifdef CONFIG_SPARSEMEM_VMEMMAP
72/*
73 * Given an address within the vmemmap, determine the pfn of the page that
74 * represents the start of the section it is within. Note that we have to
75 * do this by hand as the proffered address may not be correctly aligned.
76 * Subtraction of non-aligned pointers produces undefined results.
77 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +100078static unsigned long __meminit vmemmap_section_start(unsigned long page)
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -070079{
80 unsigned long offset = page - ((unsigned long)(vmemmap));
81
82 /* Return the pfn of the start of the section. */
83 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
84}
85
86/*
87 * Check if this vmemmap page is already initialised. If any section
88 * which overlaps this vmemmap page is initialised then this page is
89 * initialised already.
90 */
Michael Ellerman09de9ff2008-05-08 14:27:07 +100091static int __meminit vmemmap_populated(unsigned long start, int page_size)
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -070092{
93 unsigned long end = start + page_size;
Li Zhong16a05bf2014-06-11 16:23:39 +080094 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -070095
96 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
Li Zhong16a05bf2014-06-11 16:23:39 +080097 if (pfn_valid(page_to_pfn((struct page *)start)))
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -070098 return 1;
99
100 return 0;
101}
102
Anshuman Khandual39e46752017-04-06 19:44:50 +0530103/*
104 * vmemmap virtual address space management does not have a traditonal page
105 * table to track which virtual struct pages are backed by physical mapping.
106 * The virtual to physical mappings are tracked in a simple linked list
107 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
108 * all times where as the 'next' list maintains the available
109 * vmemmap_backing structures which have been deleted from the
110 * 'vmemmap_global' list during system runtime (memory hotplug remove
111 * operation). The freed 'vmemmap_backing' structures are reused later when
112 * new requests come in without allocating fresh memory. This pointer also
113 * tracks the allocated 'vmemmap_backing' structures as we allocate one
114 * full page memory at a time when we dont have any.
115 */
Mark Nelson91eea672010-04-21 16:21:03 +0000116struct vmemmap_backing *vmemmap_list;
Li Zhongbd8cb032014-06-11 16:23:36 +0800117static struct vmemmap_backing *next;
Anshuman Khandual39e46752017-04-06 19:44:50 +0530118
119/*
120 * The same pointer 'next' tracks individual chunks inside the allocated
121 * full page during the boot time and again tracks the freeed nodes during
122 * runtime. It is racy but it does not happen as they are separated by the
123 * boot process. Will create problem if some how we have memory hotplug
124 * operation during boot !!
125 */
Li Zhongbd8cb032014-06-11 16:23:36 +0800126static int num_left;
127static int num_freed;
Mark Nelson91eea672010-04-21 16:21:03 +0000128
129static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
130{
Li Zhongbd8cb032014-06-11 16:23:36 +0800131 struct vmemmap_backing *vmem_back;
132 /* get from freed entries first */
133 if (num_freed) {
134 num_freed--;
135 vmem_back = next;
136 next = next->list;
137
138 return vmem_back;
139 }
Mark Nelson91eea672010-04-21 16:21:03 +0000140
141 /* allocate a page when required and hand out chunks */
Li Zhongbd8cb032014-06-11 16:23:36 +0800142 if (!num_left) {
Mark Nelson91eea672010-04-21 16:21:03 +0000143 next = vmemmap_alloc_block(PAGE_SIZE, node);
144 if (unlikely(!next)) {
145 WARN_ON(1);
146 return NULL;
147 }
148 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
149 }
150
151 num_left--;
152
153 return next++;
154}
155
156static __meminit void vmemmap_list_populate(unsigned long phys,
157 unsigned long start,
158 int node)
159{
160 struct vmemmap_backing *vmem_back;
161
162 vmem_back = vmemmap_list_alloc(node);
163 if (unlikely(!vmem_back)) {
164 WARN_ON(1);
165 return;
166 }
167
168 vmem_back->phys = phys;
169 vmem_back->virt_addr = start;
170 vmem_back->list = vmemmap_list;
171
172 vmemmap_list = vmem_back;
173}
174
Christoph Hellwig7b73d972017-12-29 08:53:54 +0100175int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
176 struct vmem_altmap *altmap)
Li Zhong71b0bfe2014-06-11 16:23:38 +0800177{
178 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
179
180 /* Align to the page size of the linear mapping. */
181 start = _ALIGN_DOWN(start, page_size);
182
183 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
184
185 for (; start < end; start += page_size) {
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100186 void *p = NULL;
David Gibson1dace6c2016-02-09 13:32:42 +1000187 int rc;
Li Zhong71b0bfe2014-06-11 16:23:38 +0800188
189 if (vmemmap_populated(start, page_size))
190 continue;
191
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100192 /*
193 * Allocate from the altmap first if we have one. This may
194 * fail due to alignment issues when using 16MB hugepages, so
195 * fall back to system memory if the altmap allocation fail.
196 */
Christoph Hellwiga8fc3572017-12-29 08:53:58 +0100197 if (altmap)
198 p = altmap_alloc_block_buf(page_size, altmap);
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100199 if (!p)
Christoph Hellwiga8fc3572017-12-29 08:53:58 +0100200 p = vmemmap_alloc_block_buf(page_size, node);
Li Zhong71b0bfe2014-06-11 16:23:38 +0800201 if (!p)
202 return -ENOMEM;
203
204 vmemmap_list_populate(__pa(p), start, node);
205
206 pr_debug(" * %016lx..%016lx allocated at %p\n",
207 start, start + page_size, p);
208
David Gibson1dace6c2016-02-09 13:32:42 +1000209 rc = vmemmap_create_mapping(start, page_size, __pa(p));
210 if (rc < 0) {
Joe Perchesf2c2cbc2016-10-24 21:00:08 -0700211 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
212 __func__, rc);
David Gibson1dace6c2016-02-09 13:32:42 +1000213 return -EFAULT;
214 }
Li Zhong71b0bfe2014-06-11 16:23:38 +0800215 }
216
217 return 0;
218}
219
220#ifdef CONFIG_MEMORY_HOTPLUG
Li Zhongbd8cb032014-06-11 16:23:36 +0800221static unsigned long vmemmap_list_free(unsigned long start)
222{
223 struct vmemmap_backing *vmem_back, *vmem_back_prev;
224
225 vmem_back_prev = vmem_back = vmemmap_list;
226
227 /* look for it with prev pointer recorded */
228 for (; vmem_back; vmem_back = vmem_back->list) {
229 if (vmem_back->virt_addr == start)
230 break;
231 vmem_back_prev = vmem_back;
232 }
233
234 if (unlikely(!vmem_back)) {
235 WARN_ON(1);
236 return 0;
237 }
238
239 /* remove it from vmemmap_list */
240 if (vmem_back == vmemmap_list) /* remove head */
241 vmemmap_list = vmem_back->list;
242 else
243 vmem_back_prev->list = vmem_back->list;
244
245 /* next point to this freed entry */
246 vmem_back->list = next;
247 next = vmem_back;
248 num_freed++;
249
250 return vmem_back->phys;
251}
252
Christoph Hellwig24b6d412017-12-29 08:53:56 +0100253void __ref vmemmap_free(unsigned long start, unsigned long end,
254 struct vmem_altmap *altmap)
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700255{
Benjamin Herrenschmidtcec08e72008-04-30 15:41:48 +1000256 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000257 unsigned long page_order = get_order(page_size);
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100258 unsigned long alt_start = ~0, alt_end = ~0;
259 unsigned long base_pfn;
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700260
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700261 start = _ALIGN_DOWN(start, page_size);
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100262 if (altmap) {
263 alt_start = altmap->base_pfn;
264 alt_end = altmap->base_pfn + altmap->reserve +
265 altmap->free + altmap->alloc + altmap->align;
266 }
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700267
Li Zhong71b0bfe2014-06-11 16:23:38 +0800268 pr_debug("vmemmap_free %lx...%lx\n", start, end);
Benjamin Herrenschmidt32a74942009-07-23 23:15:58 +0000269
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700270 for (; start < end; start += page_size) {
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000271 unsigned long nr_pages, addr;
272 struct page *page;
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700273
Li Zhong71b0bfe2014-06-11 16:23:38 +0800274 /*
275 * the section has already be marked as invalid, so
276 * vmemmap_populated() true means some other sections still
277 * in this page, so skip it.
278 */
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700279 if (vmemmap_populated(start, page_size))
280 continue;
281
Li Zhong71b0bfe2014-06-11 16:23:38 +0800282 addr = vmemmap_list_free(start);
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000283 if (!addr)
284 continue;
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700285
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000286 page = pfn_to_page(addr >> PAGE_SHIFT);
287 nr_pages = 1 << page_order;
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100288 base_pfn = PHYS_PFN(addr);
Mark Nelson91eea672010-04-21 16:21:03 +0000289
Oliver O'Halloran9ef34632018-12-07 02:17:14 +1100290 if (base_pfn >= alt_start && base_pfn < alt_end) {
Oliver O'Halloranb584c252017-06-28 11:32:33 +1000291 vmem_altmap_free(altmap, nr_pages);
292 } else if (PageReserved(page)) {
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000293 /* allocated from bootmem */
294 if (page_size < PAGE_SIZE) {
295 /*
296 * this shouldn't happen, but if it is
297 * the case, leave the memory there
298 */
299 WARN_ON_ONCE(1);
300 } else {
301 while (nr_pages--)
302 free_reserved_page(page++);
303 }
304 } else {
305 free_pages((unsigned long)(__va(addr)), page_order);
Li Zhong71b0bfe2014-06-11 16:23:38 +0800306 }
Oliver O'Hallorand7d9b612017-06-28 11:32:32 +1000307
308 vmemmap_remove_mapping(start, page_size);
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700309 }
Andy Whitcroftd29eff7bc2007-10-16 01:24:17 -0700310}
Li Zhong71b0bfe2014-06-11 16:23:38 +0800311#endif
Nathan Fontenotf7e33342013-09-27 10:18:09 -0500312void register_page_bootmem_memmap(unsigned long section_nr,
313 struct page *start_page, unsigned long size)
314{
315}
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700316
Aneesh Kumar K.V7e7dc662017-06-28 11:39:28 +0530317#endif /* CONFIG_SPARSEMEM_VMEMMAP */
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000318
Michael Ellerman4e003742017-10-19 15:08:43 +1100319#ifdef CONFIG_PPC_BOOK3S_64
Michael Ellerman1fd6c022017-10-24 17:48:49 +0200320static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
321
Michael Ellermanc610ec62016-07-26 21:29:30 +1000322static int __init parse_disable_radix(char *p)
323{
Michael Ellerman1fd6c022017-10-24 17:48:49 +0200324 bool val;
325
Aneesh Kumar K.Vcec4e9b2018-03-30 17:39:01 +0530326 if (!p)
Michael Ellerman1fd6c022017-10-24 17:48:49 +0200327 val = true;
328 else if (kstrtobool(p, &val))
329 return -EINVAL;
330
331 disable_radix = val;
332
Michael Ellermanc610ec62016-07-26 21:29:30 +1000333 return 0;
334}
335early_param("disable_radix", parse_disable_radix);
336
Paul Mackerras18569c12017-01-30 21:21:34 +1100337/*
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100338 * If we're running under a hypervisor, we need to check the contents of
339 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
340 * radix. If not, we clear the radix feature bit so we fall back to hash.
Paul Mackerras18569c12017-01-30 21:21:34 +1100341 */
Michael Ellerman75599522017-08-08 21:44:08 +1000342static void __init early_check_vec5(void)
Paul Mackerras18569c12017-01-30 21:21:34 +1100343{
344 unsigned long root, chosen;
345 int size;
346 const u8 *vec5;
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100347 u8 mmu_supported;
Paul Mackerras18569c12017-01-30 21:21:34 +1100348
349 root = of_get_flat_dt_root();
350 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100351 if (chosen == -FDT_ERR_NOTFOUND) {
Paul Mackerrascc3d2942017-01-30 21:21:36 +1100352 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100353 return;
354 }
355 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
356 if (!vec5) {
357 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
358 return;
359 }
360 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
361 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
362 return;
363 }
364
365 /* Check for supported configuration */
366 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
367 OV5_FEAT(OV5_MMU_SUPPORT);
368 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
369 /* Hypervisor only supports radix - check enabled && GTSE */
370 if (!early_radix_enabled()) {
371 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
372 }
373 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
374 OV5_FEAT(OV5_RADIX_GTSE))) {
375 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
376 }
377 /* Do radix anyway - the hypervisor said we had to */
378 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
379 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
380 /* Hypervisor only supports hash - disable radix */
381 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
382 }
Paul Mackerras18569c12017-01-30 21:21:34 +1100383}
384
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000385void __init mmu_early_init_devtree(void)
386{
Michael Ellermanc610ec62016-07-26 21:29:30 +1000387 /* Disable radix mode based on kernel command line. */
Paul Mackerrasfc36a902017-03-21 12:38:02 +1100388 if (disable_radix)
Aneesh Kumar K.V5a25b6f2016-07-27 13:19:01 +1000389 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000390
Paul Mackerras18569c12017-01-30 21:21:34 +1100391 /*
392 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
393 * When running bare-metal, we can use radix if we like
394 * even though the ibm,architecture-vec-5 property created by
395 * skiboot doesn't have the necessary bits set.
396 */
Suraj Jitindar Singh014d02c2017-02-28 17:03:48 +1100397 if (!(mfmsr() & MSR_HV))
Paul Mackerras18569c12017-01-30 21:21:34 +1100398 early_check_vec5();
399
Aneesh Kumar K.Vb8f1b4f2016-07-23 14:42:35 +0530400 if (early_radix_enabled())
Michael Ellerman2537b092016-07-26 21:55:27 +1000401 radix__early_init_devtree();
402 else
Michael Ellermanbacf9cf2016-07-26 21:31:59 +1000403 hash__early_init_devtree();
Michael Ellerman1a01dc82016-07-26 20:09:30 +1000404}
Michael Ellerman4e003742017-10-19 15:08:43 +1100405#endif /* CONFIG_PPC_BOOK3S_64 */