blob: 79684000de0e2ccf1ae878fe6c486b8d4b133551 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
Ralf Baechleb8688682007-09-11 18:05:33 +010011#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/signal.h>
15#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010016#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/highmem.h>
27#include <linux/swap.h>
Daniel Jacobowitz3d503752005-01-20 19:59:54 -050028#include <linux/proc_fs.h>
Dave Hansen22a98352006-03-27 01:16:04 -080029#include <linux/pfn.h>
Kevin Cernekee0f334a32009-09-07 11:11:31 -070030#include <linux/hardirq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/gfp.h>
David Howells2f96b8c2013-04-12 00:10:25 +010032#include <linux/kcore.h>
Paul Burton2aa76872017-08-23 11:17:50 -070033#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010039#include <asm/kmap_types.h>
Paul Burtoncbd95a892015-07-10 16:52:38 +010040#include <asm/maar.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/mmu_context.h>
42#include <asm/sections.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/tlb.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010046#include <asm/fixmap.h>
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/*
49 * We have up to 8 empty zeroed pages so we can map one of the right colour
Ralf Baechle70342282013-01-22 12:59:30 +010050 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 * where we have to avoid VCED / VECI exceptions for good performance at
52 * any price. Since page is never written to after the initialization we
53 * don't have to care about aliases on other CPUs.
54 */
55unsigned long empty_zero_page, zero_page_mask;
Ralf Baechle497d2ad2008-06-06 14:23:06 +010056EXPORT_SYMBOL_GPL(empty_zero_page);
Ard Biesheuvel0b700682014-09-12 22:17:23 +020057EXPORT_SYMBOL(zero_page_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59/*
60 * Not static inline because used by IP27 special magic initialization code
61 */
Jiang Liu316059222013-04-29 15:06:43 -070062void setup_zero_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Jiang Liu316059222013-04-29 15:06:43 -070064 unsigned int order, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 struct page *page;
66
67 if (cpu_has_vce)
68 order = 3;
69 else
70 order = 0;
71
72 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
73 if (!empty_zero_page)
74 panic("Oh boy, that early out of memory?");
75
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020076 page = virt_to_page((void *)empty_zero_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080077 split_page(page, order);
Jiang Liu316059222013-04-29 15:06:43 -070078 for (i = 0; i < (1 << order); i++, page++)
79 mark_page_reserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Jiang Liu316059222013-04-29 15:06:43 -070081 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
Paul Burtone2a9e5a2014-03-03 12:08:40 +000084static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
Ralf Baechlef8829ca2006-10-21 23:17:35 +010085{
86 enum fixed_addresses idx;
Paul Burtonc8790d62019-02-02 01:43:28 +000087 unsigned int uninitialized_var(old_mmid);
Ralf Baechlef8829ca2006-10-21 23:17:35 +010088 unsigned long vaddr, flags, entrylo;
89 unsigned long old_ctx;
90 pte_t pte;
91 int tlbidx;
92
Ralf Baechleb8688682007-09-11 18:05:33 +010093 BUG_ON(Page_dcache_dirty(page));
94
David Hildenbrandce019482015-05-11 17:52:10 +020095 preempt_disable();
Peter Zijlstrabdb43802013-09-10 12:15:23 +020096 pagefault_disable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +010097 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
Kevin Cernekee0f334a32009-09-07 11:11:31 -070098 idx += in_interrupt() ? FIX_N_COLOURS : 0;
Ralf Baechlef8829ca2006-10-21 23:17:35 +010099 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
Paul Burtone2a9e5a2014-03-03 12:08:40 +0000100 pte = mk_pte(page, prot);
Paul Burton7b2cb642016-04-19 09:25:05 +0100101#if defined(CONFIG_XPA)
Steven J. Hillc5b36782015-02-26 18:16:38 -0600102 entrylo = pte_to_entrylo(pte.pte_high);
Paul Burton7b2cb642016-04-19 09:25:05 +0100103#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104 entrylo = pte.pte_high;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100105#else
David Daney6dd93442010-02-10 15:12:47 -0800106 entrylo = pte_to_entrylo(pte_val(pte));
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100107#endif
108
Ralf Baechleb6336482014-05-23 16:29:44 +0200109 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100110 old_ctx = read_c0_entryhi();
111 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112 write_c0_entrylo0(entrylo);
113 write_c0_entrylo1(entrylo);
Paul Burtonc8790d62019-02-02 01:43:28 +0000114 if (cpu_has_mmid) {
115 old_mmid = read_c0_memorymapid();
116 write_c0_memorymapid(MMID_KERNEL_WIRED);
117 }
Steven J. Hillc5b36782015-02-26 18:16:38 -0600118#ifdef CONFIG_XPA
James Hogan4b6f99d2016-04-19 09:25:10 +0100119 if (cpu_has_xpa) {
120 entrylo = (pte.pte_low & _PFNX_MASK);
121 writex_c0_entrylo0(entrylo);
122 writex_c0_entrylo1(entrylo);
123 }
Steven J. Hillc5b36782015-02-26 18:16:38 -0600124#endif
Paul Burton10313982016-11-12 01:26:07 +0000125 tlbidx = num_wired_entries();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100126 write_c0_wired(tlbidx + 1);
127 write_c0_index(tlbidx);
128 mtc0_tlbw_hazard();
129 tlb_write_indexed();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100130 tlbw_use_hazard();
131 write_c0_entryhi(old_ctx);
Paul Burtonc8790d62019-02-02 01:43:28 +0000132 if (cpu_has_mmid)
133 write_c0_memorymapid(old_mmid);
Ralf Baechleb6336482014-05-23 16:29:44 +0200134 local_irq_restore(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100135
136 return (void*) vaddr;
137}
138
Paul Burtone2a9e5a2014-03-03 12:08:40 +0000139void *kmap_coherent(struct page *page, unsigned long addr)
140{
141 return __kmap_pgprot(page, addr, PAGE_KERNEL);
142}
143
144void *kmap_noncoherent(struct page *page, unsigned long addr)
145{
146 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
147}
148
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100149void kunmap_coherent(void)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100150{
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100151 unsigned int wired;
152 unsigned long flags, old_ctx;
153
Ralf Baechleb6336482014-05-23 16:29:44 +0200154 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100155 old_ctx = read_c0_entryhi();
Paul Burton10313982016-11-12 01:26:07 +0000156 wired = num_wired_entries() - 1;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100157 write_c0_wired(wired);
158 write_c0_index(wired);
159 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
160 write_c0_entrylo0(0);
161 write_c0_entrylo1(0);
162 mtc0_tlbw_hazard();
163 tlb_write_indexed();
164 tlbw_use_hazard();
165 write_c0_entryhi(old_ctx);
Ralf Baechleb6336482014-05-23 16:29:44 +0200166 local_irq_restore(flags);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200167 pagefault_enable();
David Hildenbrandce019482015-05-11 17:52:10 +0200168 preempt_enable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100169}
170
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000171void copy_user_highpage(struct page *to, struct page *from,
172 unsigned long vaddr, struct vm_area_struct *vma)
173{
174 void *vfrom, *vto;
175
Cong Wang9c020482011-11-25 23:14:15 +0800176 vto = kmap_atomic(to);
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000177 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800178 page_mapcount(from) && !Page_dcache_dirty(from)) {
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000179 vfrom = kmap_coherent(from, vaddr);
180 copy_page(vto, vfrom);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100181 kunmap_coherent();
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000182 } else {
Cong Wang9c020482011-11-25 23:14:15 +0800183 vfrom = kmap_atomic(from);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000184 copy_page(vto, vfrom);
Cong Wang9c020482011-11-25 23:14:15 +0800185 kunmap_atomic(vfrom);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000186 }
Ralf Baechle39b8d522008-04-28 17:14:26 +0100187 if ((!cpu_has_ic_fills_f_dc) ||
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000188 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
189 flush_data_cache_page((unsigned long)vto);
Cong Wang9c020482011-11-25 23:14:15 +0800190 kunmap_atomic(vto);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000191 /* Make sure this page is cleared on other CPU's too before using it */
192 smp_wmb();
193}
194
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100195void copy_to_user_page(struct vm_area_struct *vma,
196 struct page *page, unsigned long vaddr, void *dst, const void *src,
197 unsigned long len)
198{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000199 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800200 page_mapcount(page) && !Page_dcache_dirty(page)) {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100201 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
202 memcpy(vto, src, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100203 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100204 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100205 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100206 if (cpu_has_dc_aliases)
207 SetPageDcacheDirty(page);
208 }
James Hoganb2a3c5b2016-01-22 10:58:25 +0000209 if (vma->vm_flags & VM_EXEC)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100210 flush_cache_page(vma, vaddr, page_to_pfn(page));
211}
212
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100213void copy_from_user_page(struct vm_area_struct *vma,
214 struct page *page, unsigned long vaddr, void *dst, const void *src,
215 unsigned long len)
216{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000217 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800218 page_mapcount(page) && !Page_dcache_dirty(page)) {
Ralf Baechle985c30e2007-10-15 16:30:24 +0100219 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100220 memcpy(dst, vfrom, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100221 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100222 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100223 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100224 if (cpu_has_dc_aliases)
225 SetPageDcacheDirty(page);
226 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100227}
Geert Uytterhoevenbf9621a2013-09-05 11:22:45 +0200228EXPORT_SYMBOL_GPL(copy_from_user_page);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100229
Ralf Baechle84fd0892005-02-07 16:13:07 +0000230void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 pgd_t *pgd_base)
232{
Ralf Baechleb6336482014-05-23 16:29:44 +0200233#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 pgd_t *pgd;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000235 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 pmd_t *pmd;
237 pte_t *pte;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000238 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 unsigned long vaddr;
240
241 vaddr = start;
Mike Rapoport31168f02019-11-21 18:21:32 +0200242 i = pgd_index(vaddr);
243 j = pud_index(vaddr);
244 k = pmd_index(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 pgd = pgd_base + i;
246
Kevin Cernekee464fd832011-01-05 23:31:30 -0800247 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000248 pud = (pud_t *)pgd;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800249 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000250 pmd = (pmd_t *)pud;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800251 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000252 if (pmd_none(*pmd)) {
Mike Rapoporte8625dc2018-10-30 15:08:54 -0700253 pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
254 PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700255 if (!pte)
256 panic("%s: Failed to allocate %lu bytes align=%lx\n",
257 __func__, PAGE_SIZE,
258 PAGE_SIZE);
259
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100260 set_pmd(pmd, __pmd((unsigned long)pte));
Ralf Baechleb72b7092009-03-30 14:49:44 +0200261 BUG_ON(pte != pte_offset_kernel(pmd, 0));
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000262 }
263 vaddr += PMD_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 }
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000265 k = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267 j = 0;
268 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100269#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800272struct maar_walk_info {
273 struct maar_config cfg[16];
274 unsigned int num_cfg;
275};
276
277static int maar_res_walk(unsigned long start_pfn, unsigned long nr_pages,
278 void *data)
279{
280 struct maar_walk_info *wi = data;
281 struct maar_config *cfg = &wi->cfg[wi->num_cfg];
282 unsigned int maar_align;
283
284 /* MAAR registers hold physical addresses right shifted by 4 bits */
285 maar_align = BIT(MIPS_MAAR_ADDR_SHIFT + 4);
286
287 /* Fill in the MAAR config entry */
288 cfg->lower = ALIGN(PFN_PHYS(start_pfn), maar_align);
289 cfg->upper = ALIGN_DOWN(PFN_PHYS(start_pfn + nr_pages), maar_align) - 1;
290 cfg->attrs = MIPS_MAAR_S;
291
292 /* Ensure we don't overflow the cfg array */
293 if (!WARN_ON(wi->num_cfg >= ARRAY_SIZE(wi->cfg)))
294 wi->num_cfg++;
295
296 return 0;
297}
298
299
Paul Burtondef3ab52015-09-25 08:59:36 -0700300unsigned __weak platform_maar_init(unsigned num_pairs)
301{
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800302 unsigned int num_configured;
303 struct maar_walk_info wi;
Paul Burtondef3ab52015-09-25 08:59:36 -0700304
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800305 wi.num_cfg = 0;
306 walk_system_ram_range(0, max_pfn, &wi, maar_res_walk);
Paul Burtondef3ab52015-09-25 08:59:36 -0700307
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800308 num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs);
309 if (num_configured < wi.num_cfg)
310 pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
311 num_pairs, wi.num_cfg);
Paul Burtondef3ab52015-09-25 08:59:36 -0700312
313 return num_configured;
314}
315
Paul Burtone060f6e2015-09-25 08:59:38 -0700316void maar_init(void)
Paul Burtondef3ab52015-09-25 08:59:36 -0700317{
318 unsigned num_maars, used, i;
Paul Burton651ca7f2015-09-25 08:59:37 -0700319 phys_addr_t lower, upper, attr;
Paul Burtone060f6e2015-09-25 08:59:38 -0700320 static struct {
321 struct maar_config cfgs[3];
322 unsigned used;
323 } recorded = { { { 0 } }, 0 };
Paul Burtondef3ab52015-09-25 08:59:36 -0700324
325 if (!cpu_has_maar)
326 return;
327
328 /* Detect the number of MAARs */
329 write_c0_maari(~0);
330 back_to_back_c0_hazard();
331 num_maars = read_c0_maari() + 1;
332
333 /* MAARs should be in pairs */
334 WARN_ON(num_maars % 2);
335
Paul Burtone060f6e2015-09-25 08:59:38 -0700336 /* Set MAARs using values we recorded already */
337 if (recorded.used) {
338 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
339 BUG_ON(used != recorded.used);
340 } else {
341 /* Configure the required MAARs */
342 used = platform_maar_init(num_maars / 2);
343 }
Paul Burtondef3ab52015-09-25 08:59:36 -0700344
345 /* Disable any further MAARs */
346 for (i = (used * 2); i < num_maars; i++) {
347 write_c0_maari(i);
348 back_to_back_c0_hazard();
349 write_c0_maar(0);
350 back_to_back_c0_hazard();
351 }
Paul Burton651ca7f2015-09-25 08:59:37 -0700352
Paul Burtone060f6e2015-09-25 08:59:38 -0700353 if (recorded.used)
354 return;
355
Paul Burton651ca7f2015-09-25 08:59:37 -0700356 pr_info("MAAR configuration:\n");
357 for (i = 0; i < num_maars; i += 2) {
358 write_c0_maari(i);
359 back_to_back_c0_hazard();
360 upper = read_c0_maar();
361
362 write_c0_maari(i + 1);
363 back_to_back_c0_hazard();
364 lower = read_c0_maar();
365
366 attr = lower & upper;
367 lower = (lower & MIPS_MAAR_ADDR) << 4;
368 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
369
370 pr_info(" [%d]: ", i / 2);
James Hoganf359a112017-03-14 10:15:09 +0000371 if (!(attr & MIPS_MAAR_VL)) {
Paul Burton651ca7f2015-09-25 08:59:37 -0700372 pr_cont("disabled\n");
373 continue;
374 }
375
376 pr_cont("%pa-%pa", &lower, &upper);
377
378 if (attr & MIPS_MAAR_S)
379 pr_cont(" speculate");
380
381 pr_cont("\n");
Paul Burtone060f6e2015-09-25 08:59:38 -0700382
383 /* Record the setup for use on secondary CPUs */
384 if (used <= ARRAY_SIZE(recorded.cfgs)) {
385 recorded.cfgs[recorded.used].lower = lower;
386 recorded.cfgs[recorded.used].upper = upper;
387 recorded.cfgs[recorded.used].attrs = attr;
388 recorded.used++;
389 }
Paul Burton651ca7f2015-09-25 08:59:37 -0700390 }
Paul Burtondef3ab52015-09-25 08:59:36 -0700391}
392
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700393#ifndef CONFIG_NEED_MULTIPLE_NODES
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394void __init paging_init(void)
395{
Ralf Baechlecce335a2007-11-03 02:05:43 +0000396 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 pagetable_init();
399
400#ifdef CONFIG_HIGHMEM
401 kmap_init();
402#endif
Atsushi Nemoto05502332007-03-21 00:36:02 +0900403#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335a2007-11-03 02:05:43 +0000404 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405#endif
Ralf Baechlecce335a2007-11-03 02:05:43 +0000406#ifdef CONFIG_ZONE_DMA32
407 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
408#endif
409 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410#ifdef CONFIG_HIGHMEM
Ralf Baechlecce335a2007-11-03 02:05:43 +0000411 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100412
Ralf Baechlecce335a2007-11-03 02:05:43 +0000413 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100414 printk(KERN_WARNING "This processor doesn't support highmem."
Ralf Baechlecce335a2007-11-03 02:05:43 +0000415 " %ldk highmem ignored\n",
416 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
417 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419#endif
420
Ralf Baechlecce335a2007-11-03 02:05:43 +0000421 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500424#ifdef CONFIG_64BIT
425static struct kcore_list kcore_kseg0;
426#endif
427
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800428static inline void __init mem_init_free_highmem(void)
Jiang Liu11321372013-07-03 15:04:04 -0700429{
430#ifdef CONFIG_HIGHMEM
431 unsigned long tmp;
432
Paul Burton058effe2016-09-02 15:17:31 +0100433 if (cpu_has_dc_aliases)
434 return;
435
Jiang Liu11321372013-07-03 15:04:04 -0700436 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
Paul Burton625cfb62019-08-23 17:48:58 +0100437 struct page *page = pfn_to_page(tmp);
438
Jiaxun Yanga5718fe2019-08-19 22:23:12 +0800439 if (!memblock_is_memory(PFN_PHYS(tmp)))
Jiang Liu11321372013-07-03 15:04:04 -0700440 SetPageReserved(page);
441 else
442 free_highmem_page(page);
443 }
444#endif
445}
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447void __init mem_init(void)
448{
Paul Burton05d013a2019-09-18 22:03:30 +0000449 /*
450 * When _PFN_SHIFT is greater than PAGE_SHIFT we won't have enough PTE
451 * bits to hold a full 32b physical address on MIPS32 systems.
452 */
453 BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455#ifdef CONFIG_HIGHMEM
456#ifdef CONFIG_DISCONTIGMEM
457#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
458#endif
Kevin Cernekeeb6da0ff2010-05-30 00:32:51 -0700459 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460#else
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900461 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462#endif
463 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
464
Paul Burtonab9988a2014-07-14 10:32:15 +0100465 maar_init();
Mike Rapoportc6ffc5c2018-10-30 15:09:30 -0700466 memblock_free_all();
Jiang Liu316059222013-04-29 15:06:43 -0700467 setup_zero_pages(); /* Setup zeroed pages. */
Jiang Liu11321372013-07-03 15:04:04 -0700468 mem_init_free_highmem();
469 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500471#ifdef CONFIG_64BIT
472 if ((unsigned long) &_text > (unsigned long) CKSEG0)
473 /* The -4 is a hack so that user tools don't have to handle
474 the overflow. */
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -0700475 kclist_add(&kcore_kseg0, (void *) CKSEG0,
476 0x80000000 - 4, KCORE_TEXT);
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500477#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700479#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900481void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000482{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200483 unsigned long pfn;
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000484
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200485 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
486 struct page *page = pfn_to_page(pfn);
487 void *addr = phys_to_virt(PFN_PHYS(pfn));
488
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200489 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
Jiang Liu316059222013-04-29 15:06:43 -0700490 free_reserved_page(page);
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000491 }
492 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
493}
494
Markos Chandras0893d3f2014-01-15 14:06:03 +0000495void (*free_init_pages_eva)(void *begin, void *end) = NULL;
496
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700497void __ref free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498{
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900499 prom_free_prom_memory();
Markos Chandras0893d3f2014-01-15 14:06:03 +0000500 /*
501 * Let the platform define a specific function to free the
502 * init section since EVA may have used any possible mapping
503 * between virtual and physical addresses.
504 */
505 if (free_init_pages_eva)
506 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
507 else
508 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900510
Thomas Bogendoerferf3c560a2020-01-09 13:23:31 +0100511#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
512unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
513EXPORT_SYMBOL(__per_cpu_offset);
514
515static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
516{
517 return node_distance(cpu_to_node(from), cpu_to_node(to));
518}
519
520static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
521 size_t align)
522{
523 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
524 MEMBLOCK_ALLOC_ACCESSIBLE,
525 cpu_to_node(cpu));
526}
527
528static void __init pcpu_fc_free(void *ptr, size_t size)
529{
530 memblock_free_early(__pa(ptr), size);
531}
532
533void __init setup_per_cpu_areas(void)
534{
535 unsigned long delta;
536 unsigned int cpu;
537 int rc;
538
539 /*
540 * Always reserve area for module percpu variables. That's
541 * what the legacy allocator did.
542 */
543 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
544 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
545 pcpu_cpu_distance,
546 pcpu_fc_alloc, pcpu_fc_free);
547 if (rc < 0)
548 panic("Failed to initialize percpu areas.");
549
550 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
551 for_each_possible_cpu(cpu)
552 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
553}
554#endif
555
David Daney82622282009-10-14 12:16:56 -0700556#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900557unsigned long pgd_current[NR_CPUS];
David Daney82622282009-10-14 12:16:56 -0700558#endif
Ralf Baechle9975e772007-08-13 12:44:41 +0100559
560/*
David Daney485172b2012-08-14 11:08:01 -0700561 * Align swapper_pg_dir in to 64K, allows its address to be loaded
562 * with a single LUI instruction in the TLB handlers. If we used
563 * __aligned(64K), its size would get rounded up to the alignment
564 * size, and waste space. So we place it in its own section and align
565 * it in the linker script.
Ralf Baechle9975e772007-08-13 12:44:41 +0100566 */
Alexandre Belloni2f0b6492018-09-28 11:32:02 +0200567pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
Alex Belits3377e222017-02-16 17:27:34 -0800568#ifndef __PAGETABLE_PUD_FOLDED
569pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
570#endif
David Daney325f8a02009-12-04 13:52:36 -0800571#ifndef __PAGETABLE_PMD_FOLDED
David Daney485172b2012-08-14 11:08:01 -0700572pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
James Hoganccf01512015-10-16 16:33:13 +0100573EXPORT_SYMBOL_GPL(invalid_pmd_table);
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900574#endif
David Daney485172b2012-08-14 11:08:01 -0700575pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
Paul Burtonaa4089e2016-11-07 11:14:12 +0000576EXPORT_SYMBOL(invalid_pte_table);