blob: 84b7b592b834dcd58e762a74ce552672e703e4ee [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
Ralf Baechleb8688682007-09-11 18:05:33 +010011#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/init.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -040013#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/signal.h>
15#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010016#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
Daniel Jacobowitz3d503752005-01-20 19:59:54 -050028#include <linux/proc_fs.h>
Dave Hansen22a98352006-03-27 01:16:04 -080029#include <linux/pfn.h>
Kevin Cernekee0f334a32009-09-07 11:11:31 -070030#include <linux/hardirq.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090031#include <linux/gfp.h>
David Howells2f96b8c2013-04-12 00:10:25 +010032#include <linux/kcore.h>
Paul Burtonaa4089e2016-11-07 11:14:12 +000033#include <linux/export.h>
Paul Burton2aa76872017-08-23 11:17:50 -070034#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Ralf Baechle9975e772007-08-13 12:44:41 +010036#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/bootinfo.h>
38#include <asm/cachectl.h>
39#include <asm/cpu.h>
40#include <asm/dma.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010041#include <asm/kmap_types.h>
Paul Burtoncbd95a892015-07-10 16:52:38 +010042#include <asm/maar.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/mmu_context.h>
44#include <asm/sections.h>
45#include <asm/pgtable.h>
46#include <asm/pgalloc.h>
47#include <asm/tlb.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010048#include <asm/fixmap.h>
Paul Burtone060f6e2015-09-25 08:59:38 -070049#include <asm/maar.h>
Ralf Baechlef8829ca2006-10-21 23:17:35 +010050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/*
52 * We have up to 8 empty zeroed pages so we can map one of the right colour
Ralf Baechle70342282013-01-22 12:59:30 +010053 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * where we have to avoid VCED / VECI exceptions for good performance at
55 * any price. Since page is never written to after the initialization we
56 * don't have to care about aliases on other CPUs.
57 */
58unsigned long empty_zero_page, zero_page_mask;
Ralf Baechle497d2ad2008-06-06 14:23:06 +010059EXPORT_SYMBOL_GPL(empty_zero_page);
Ard Biesheuvel0b700682014-09-12 22:17:23 +020060EXPORT_SYMBOL(zero_page_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061
62/*
63 * Not static inline because used by IP27 special magic initialization code
64 */
Jiang Liu316059222013-04-29 15:06:43 -070065void setup_zero_pages(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Jiang Liu316059222013-04-29 15:06:43 -070067 unsigned int order, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 struct page *page;
69
70 if (cpu_has_vce)
71 order = 3;
72 else
73 order = 0;
74
75 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
76 if (!empty_zero_page)
77 panic("Oh boy, that early out of memory?");
78
Franck Bui-Huu99e3b942006-10-19 13:19:59 +020079 page = virt_to_page((void *)empty_zero_page);
Nick Piggin8dfcc9b2006-03-22 00:08:05 -080080 split_page(page, order);
Jiang Liu316059222013-04-29 15:06:43 -070081 for (i = 0; i < (1 << order); i++, page++)
82 mark_page_reserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Jiang Liu316059222013-04-29 15:06:43 -070084 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Paul Burtone2a9e5a2014-03-03 12:08:40 +000087static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
Ralf Baechlef8829ca2006-10-21 23:17:35 +010088{
89 enum fixed_addresses idx;
90 unsigned long vaddr, flags, entrylo;
91 unsigned long old_ctx;
92 pte_t pte;
93 int tlbidx;
94
Ralf Baechleb8688682007-09-11 18:05:33 +010095 BUG_ON(Page_dcache_dirty(page));
96
David Hildenbrandce019482015-05-11 17:52:10 +020097 preempt_disable();
Peter Zijlstrabdb43802013-09-10 12:15:23 +020098 pagefault_disable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +010099 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
Kevin Cernekee0f334a32009-09-07 11:11:31 -0700100 idx += in_interrupt() ? FIX_N_COLOURS : 0;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100101 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
Paul Burtone2a9e5a2014-03-03 12:08:40 +0000102 pte = mk_pte(page, prot);
Paul Burton7b2cb642016-04-19 09:25:05 +0100103#if defined(CONFIG_XPA)
Steven J. Hillc5b36782015-02-26 18:16:38 -0600104 entrylo = pte_to_entrylo(pte.pte_high);
Paul Burton7b2cb642016-04-19 09:25:05 +0100105#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
106 entrylo = pte.pte_high;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100107#else
David Daney6dd93442010-02-10 15:12:47 -0800108 entrylo = pte_to_entrylo(pte_val(pte));
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100109#endif
110
Ralf Baechleb6336482014-05-23 16:29:44 +0200111 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100112 old_ctx = read_c0_entryhi();
113 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
114 write_c0_entrylo0(entrylo);
115 write_c0_entrylo1(entrylo);
Steven J. Hillc5b36782015-02-26 18:16:38 -0600116#ifdef CONFIG_XPA
James Hogan4b6f99d2016-04-19 09:25:10 +0100117 if (cpu_has_xpa) {
118 entrylo = (pte.pte_low & _PFNX_MASK);
119 writex_c0_entrylo0(entrylo);
120 writex_c0_entrylo1(entrylo);
121 }
Steven J. Hillc5b36782015-02-26 18:16:38 -0600122#endif
Paul Burton10313982016-11-12 01:26:07 +0000123 tlbidx = num_wired_entries();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100124 write_c0_wired(tlbidx + 1);
125 write_c0_index(tlbidx);
126 mtc0_tlbw_hazard();
127 tlb_write_indexed();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100128 tlbw_use_hazard();
129 write_c0_entryhi(old_ctx);
Ralf Baechleb6336482014-05-23 16:29:44 +0200130 local_irq_restore(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100131
132 return (void*) vaddr;
133}
134
Paul Burtone2a9e5a2014-03-03 12:08:40 +0000135void *kmap_coherent(struct page *page, unsigned long addr)
136{
137 return __kmap_pgprot(page, addr, PAGE_KERNEL);
138}
139
140void *kmap_noncoherent(struct page *page, unsigned long addr)
141{
142 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
143}
144
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100145void kunmap_coherent(void)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100146{
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100147 unsigned int wired;
148 unsigned long flags, old_ctx;
149
Ralf Baechleb6336482014-05-23 16:29:44 +0200150 local_irq_save(flags);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100151 old_ctx = read_c0_entryhi();
Paul Burton10313982016-11-12 01:26:07 +0000152 wired = num_wired_entries() - 1;
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100153 write_c0_wired(wired);
154 write_c0_index(wired);
155 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
156 write_c0_entrylo0(0);
157 write_c0_entrylo1(0);
158 mtc0_tlbw_hazard();
159 tlb_write_indexed();
160 tlbw_use_hazard();
161 write_c0_entryhi(old_ctx);
Ralf Baechleb6336482014-05-23 16:29:44 +0200162 local_irq_restore(flags);
Peter Zijlstrabdb43802013-09-10 12:15:23 +0200163 pagefault_enable();
David Hildenbrandce019482015-05-11 17:52:10 +0200164 preempt_enable();
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100165}
166
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000167void copy_user_highpage(struct page *to, struct page *from,
168 unsigned long vaddr, struct vm_area_struct *vma)
169{
170 void *vfrom, *vto;
171
Cong Wang9c020482011-11-25 23:14:15 +0800172 vto = kmap_atomic(to);
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000173 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800174 page_mapcount(from) && !Page_dcache_dirty(from)) {
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000175 vfrom = kmap_coherent(from, vaddr);
176 copy_page(vto, vfrom);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100177 kunmap_coherent();
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000178 } else {
Cong Wang9c020482011-11-25 23:14:15 +0800179 vfrom = kmap_atomic(from);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000180 copy_page(vto, vfrom);
Cong Wang9c020482011-11-25 23:14:15 +0800181 kunmap_atomic(vfrom);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000182 }
Ralf Baechle39b8d522008-04-28 17:14:26 +0100183 if ((!cpu_has_ic_fills_f_dc) ||
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000184 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
185 flush_data_cache_page((unsigned long)vto);
Cong Wang9c020482011-11-25 23:14:15 +0800186 kunmap_atomic(vto);
Atsushi Nemotobcd02282006-12-12 17:14:56 +0000187 /* Make sure this page is cleared on other CPU's too before using it */
188 smp_wmb();
189}
190
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100191void copy_to_user_page(struct vm_area_struct *vma,
192 struct page *page, unsigned long vaddr, void *dst, const void *src,
193 unsigned long len)
194{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000195 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800196 page_mapcount(page) && !Page_dcache_dirty(page)) {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100197 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
198 memcpy(vto, src, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100199 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100200 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100201 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100202 if (cpu_has_dc_aliases)
203 SetPageDcacheDirty(page);
204 }
James Hoganb2a3c5b2016-01-22 10:58:25 +0000205 if (vma->vm_flags & VM_EXEC)
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100206 flush_cache_page(vma, vaddr, page_to_pfn(page));
207}
208
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100209void copy_from_user_page(struct vm_area_struct *vma,
210 struct page *page, unsigned long vaddr, void *dst, const void *src,
211 unsigned long len)
212{
Ralf Baechle9a74b3e2008-02-16 22:34:25 +0000213 if (cpu_has_dc_aliases &&
Kirill A. Shutemove1534ae2016-01-15 16:53:46 -0800214 page_mapcount(page) && !Page_dcache_dirty(page)) {
Ralf Baechle985c30e2007-10-15 16:30:24 +0100215 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100216 memcpy(dst, vfrom, len);
Ralf Baechleeacb9d62007-04-26 15:46:25 +0100217 kunmap_coherent();
Ralf Baechle985c30e2007-10-15 16:30:24 +0100218 } else {
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100219 memcpy(dst, src, len);
Ralf Baechle985c30e2007-10-15 16:30:24 +0100220 if (cpu_has_dc_aliases)
221 SetPageDcacheDirty(page);
222 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100223}
Geert Uytterhoevenbf9621a2013-09-05 11:22:45 +0200224EXPORT_SYMBOL_GPL(copy_from_user_page);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100225
Ralf Baechle84fd0892005-02-07 16:13:07 +0000226void __init fixrange_init(unsigned long start, unsigned long end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 pgd_t *pgd_base)
228{
Ralf Baechleb6336482014-05-23 16:29:44 +0200229#ifdef CONFIG_HIGHMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 pgd_t *pgd;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000231 pud_t *pud;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 pmd_t *pmd;
233 pte_t *pte;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000234 int i, j, k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 unsigned long vaddr;
236
237 vaddr = start;
238 i = __pgd_offset(vaddr);
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000239 j = __pud_offset(vaddr);
240 k = __pmd_offset(vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 pgd = pgd_base + i;
242
Kevin Cernekee464fd832011-01-05 23:31:30 -0800243 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000244 pud = (pud_t *)pgd;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800245 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000246 pmd = (pmd_t *)pud;
Kevin Cernekee464fd832011-01-05 23:31:30 -0800247 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000248 if (pmd_none(*pmd)) {
249 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100250 set_pmd(pmd, __pmd((unsigned long)pte));
Ralf Baechleb72b7092009-03-30 14:49:44 +0200251 BUG_ON(pte != pte_offset_kernel(pmd, 0));
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000252 }
253 vaddr += PMD_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 }
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000255 k = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257 j = 0;
258 }
Ralf Baechlef8829ca2006-10-21 23:17:35 +0100259#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Paul Burtondef3ab52015-09-25 08:59:36 -0700262unsigned __weak platform_maar_init(unsigned num_pairs)
263{
264 struct maar_config cfg[BOOT_MEM_MAP_MAX];
265 unsigned i, num_configured, num_cfg = 0;
Paul Burtondef3ab52015-09-25 08:59:36 -0700266
267 for (i = 0; i < boot_mem_map.nr_map; i++) {
268 switch (boot_mem_map.map[i].type) {
269 case BOOT_MEM_RAM:
270 case BOOT_MEM_INIT_RAM:
271 break;
272 default:
273 continue;
274 }
275
James Hoganac7e3852016-08-09 13:21:49 +0100276 /* Round lower up */
Paul Burtondef3ab52015-09-25 08:59:36 -0700277 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
James Hoganac7e3852016-08-09 13:21:49 +0100278 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
Paul Burtondef3ab52015-09-25 08:59:36 -0700279
James Hoganac7e3852016-08-09 13:21:49 +0100280 /* Round upper down */
281 cfg[num_cfg].upper = boot_mem_map.map[i].addr +
282 boot_mem_map.map[i].size;
283 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
Paul Burtondef3ab52015-09-25 08:59:36 -0700284
285 cfg[num_cfg].attrs = MIPS_MAAR_S;
286 num_cfg++;
287 }
288
289 num_configured = maar_config(cfg, num_cfg, num_pairs);
290 if (num_configured < num_cfg)
291 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
292 num_pairs, num_cfg);
293
294 return num_configured;
295}
296
Paul Burtone060f6e2015-09-25 08:59:38 -0700297void maar_init(void)
Paul Burtondef3ab52015-09-25 08:59:36 -0700298{
299 unsigned num_maars, used, i;
Paul Burton651ca7f2015-09-25 08:59:37 -0700300 phys_addr_t lower, upper, attr;
Paul Burtone060f6e2015-09-25 08:59:38 -0700301 static struct {
302 struct maar_config cfgs[3];
303 unsigned used;
304 } recorded = { { { 0 } }, 0 };
Paul Burtondef3ab52015-09-25 08:59:36 -0700305
306 if (!cpu_has_maar)
307 return;
308
309 /* Detect the number of MAARs */
310 write_c0_maari(~0);
311 back_to_back_c0_hazard();
312 num_maars = read_c0_maari() + 1;
313
314 /* MAARs should be in pairs */
315 WARN_ON(num_maars % 2);
316
Paul Burtone060f6e2015-09-25 08:59:38 -0700317 /* Set MAARs using values we recorded already */
318 if (recorded.used) {
319 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
320 BUG_ON(used != recorded.used);
321 } else {
322 /* Configure the required MAARs */
323 used = platform_maar_init(num_maars / 2);
324 }
Paul Burtondef3ab52015-09-25 08:59:36 -0700325
326 /* Disable any further MAARs */
327 for (i = (used * 2); i < num_maars; i++) {
328 write_c0_maari(i);
329 back_to_back_c0_hazard();
330 write_c0_maar(0);
331 back_to_back_c0_hazard();
332 }
Paul Burton651ca7f2015-09-25 08:59:37 -0700333
Paul Burtone060f6e2015-09-25 08:59:38 -0700334 if (recorded.used)
335 return;
336
Paul Burton651ca7f2015-09-25 08:59:37 -0700337 pr_info("MAAR configuration:\n");
338 for (i = 0; i < num_maars; i += 2) {
339 write_c0_maari(i);
340 back_to_back_c0_hazard();
341 upper = read_c0_maar();
342
343 write_c0_maari(i + 1);
344 back_to_back_c0_hazard();
345 lower = read_c0_maar();
346
347 attr = lower & upper;
348 lower = (lower & MIPS_MAAR_ADDR) << 4;
349 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
350
351 pr_info(" [%d]: ", i / 2);
James Hoganf359a112017-03-14 10:15:09 +0000352 if (!(attr & MIPS_MAAR_VL)) {
Paul Burton651ca7f2015-09-25 08:59:37 -0700353 pr_cont("disabled\n");
354 continue;
355 }
356
357 pr_cont("%pa-%pa", &lower, &upper);
358
359 if (attr & MIPS_MAAR_S)
360 pr_cont(" speculate");
361
362 pr_cont("\n");
Paul Burtone060f6e2015-09-25 08:59:38 -0700363
364 /* Record the setup for use on secondary CPUs */
365 if (used <= ARRAY_SIZE(recorded.cfgs)) {
366 recorded.cfgs[recorded.used].lower = lower;
367 recorded.cfgs[recorded.used].upper = upper;
368 recorded.cfgs[recorded.used].attrs = attr;
369 recorded.used++;
370 }
Paul Burton651ca7f2015-09-25 08:59:37 -0700371 }
Paul Burtondef3ab52015-09-25 08:59:36 -0700372}
373
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700374#ifndef CONFIG_NEED_MULTIPLE_NODES
Wu Fengguang61ef2482010-01-22 16:16:19 +0800375int page_is_ram(unsigned long pagenr)
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900376{
377 int i;
378
379 for (i = 0; i < boot_mem_map.nr_map; i++) {
380 unsigned long addr, end;
381
David Daney43064c02011-11-22 14:38:03 +0000382 switch (boot_mem_map.map[i].type) {
383 case BOOT_MEM_RAM:
384 case BOOT_MEM_INIT_RAM:
385 break;
386 default:
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900387 /* not usable memory */
388 continue;
David Daney43064c02011-11-22 14:38:03 +0000389 }
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900390
391 addr = PFN_UP(boot_mem_map.map[i].addr);
392 end = PFN_DOWN(boot_mem_map.map[i].addr +
393 boot_mem_map.map[i].size);
394
395 if (pagenr >= addr && pagenr < end)
396 return 1;
397 }
398
399 return 0;
400}
401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402void __init paging_init(void)
403{
Ralf Baechlecce335a2007-11-03 02:05:43 +0000404 unsigned long max_zone_pfns[MAX_NR_ZONES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 pagetable_init();
407
408#ifdef CONFIG_HIGHMEM
409 kmap_init();
410#endif
Atsushi Nemoto05502332007-03-21 00:36:02 +0900411#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335a2007-11-03 02:05:43 +0000412 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413#endif
Ralf Baechlecce335a2007-11-03 02:05:43 +0000414#ifdef CONFIG_ZONE_DMA32
415 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
416#endif
417 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418#ifdef CONFIG_HIGHMEM
Ralf Baechlecce335a2007-11-03 02:05:43 +0000419 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100420
Ralf Baechlecce335a2007-11-03 02:05:43 +0000421 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100422 printk(KERN_WARNING "This processor doesn't support highmem."
Ralf Baechlecce335a2007-11-03 02:05:43 +0000423 " %ldk highmem ignored\n",
424 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
425 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
Franck Bui-Huucbb8fc02006-12-06 16:48:28 +0100426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427#endif
428
Ralf Baechlecce335a2007-11-03 02:05:43 +0000429 free_area_init_nodes(max_zone_pfns);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500432#ifdef CONFIG_64BIT
433static struct kcore_list kcore_kseg0;
434#endif
435
Jiang Liu11321372013-07-03 15:04:04 -0700436static inline void mem_init_free_highmem(void)
437{
438#ifdef CONFIG_HIGHMEM
439 unsigned long tmp;
440
Paul Burton058effe2016-09-02 15:17:31 +0100441 if (cpu_has_dc_aliases)
442 return;
443
Jiang Liu11321372013-07-03 15:04:04 -0700444 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
445 struct page *page = pfn_to_page(tmp);
446
447 if (!page_is_ram(tmp))
448 SetPageReserved(page);
449 else
450 free_highmem_page(page);
451 }
452#endif
453}
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455void __init mem_init(void)
456{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457#ifdef CONFIG_HIGHMEM
458#ifdef CONFIG_DISCONTIGMEM
459#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
460#endif
Kevin Cernekeeb6da0ff2010-05-30 00:32:51 -0700461 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462#else
Atsushi Nemoto565200a2006-07-07 00:26:02 +0900463 max_mapnr = max_low_pfn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464#endif
465 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
466
Paul Burtonab9988a2014-07-14 10:32:15 +0100467 maar_init();
Jiang Liu0c988532013-07-03 15:03:24 -0700468 free_all_bootmem();
Jiang Liu316059222013-04-29 15:06:43 -0700469 setup_zero_pages(); /* Setup zeroed pages. */
Jiang Liu11321372013-07-03 15:04:04 -0700470 mem_init_free_highmem();
471 mem_init_print_info(NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500473#ifdef CONFIG_64BIT
474 if ((unsigned long) &_text > (unsigned long) CKSEG0)
475 /* The -4 is a hack so that user tools don't have to handle
476 the overflow. */
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -0700477 kclist_add(&kcore_kseg0, (void *) CKSEG0,
478 0x80000000 - 4, KCORE_TEXT);
Daniel Jacobowitz3d503752005-01-20 19:59:54 -0500479#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480}
Yoichi Yuasab4819b52005-06-25 14:54:31 -0700481#endif /* !CONFIG_NEED_MULTIPLE_NODES */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900483void free_init_pages(const char *what, unsigned long begin, unsigned long end)
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000484{
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200485 unsigned long pfn;
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000486
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200487 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
488 struct page *page = pfn_to_page(pfn);
489 void *addr = phys_to_virt(PFN_PHYS(pfn));
490
Franck Bui-Huuacd86b82006-10-19 13:20:05 +0200491 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
Jiang Liu316059222013-04-29 15:06:43 -0700492 free_reserved_page(page);
Ralf Baechle6fd11a22006-03-24 13:21:50 +0000493 }
494 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
495}
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497#ifdef CONFIG_BLK_DEV_INITRD
498void free_initrd_mem(unsigned long start, unsigned long end)
499{
Jiang Liu11199692013-07-03 15:02:48 -0700500 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
501 "initrd");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502}
503#endif
504
Markos Chandras0893d3f2014-01-15 14:06:03 +0000505void (*free_init_pages_eva)(void *begin, void *end) = NULL;
506
Fabian Frederickbd721ea2016-08-02 14:03:33 -0700507void __ref free_initmem(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508{
Atsushi Nemotoc44e8d52006-12-30 00:43:59 +0900509 prom_free_prom_memory();
Markos Chandras0893d3f2014-01-15 14:06:03 +0000510 /*
511 * Let the platform define a specific function to free the
512 * init section since EVA may have used any possible mapping
513 * between virtual and physical addresses.
514 */
515 if (free_init_pages_eva)
516 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
517 else
518 free_initmem_default(POISON_FREE_INITMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900520
David Daney82622282009-10-14 12:16:56 -0700521#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900522unsigned long pgd_current[NR_CPUS];
David Daney82622282009-10-14 12:16:56 -0700523#endif
Ralf Baechle9975e772007-08-13 12:44:41 +0100524
525/*
526 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
527 * are constants. So we use the variants from asm-offset.h until that gcc
528 * will officially be retired.
David Daney485172b2012-08-14 11:08:01 -0700529 *
530 * Align swapper_pg_dir in to 64K, allows its address to be loaded
531 * with a single LUI instruction in the TLB handlers. If we used
532 * __aligned(64K), its size would get rounded up to the alignment
533 * size, and waste space. So we place it in its own section and align
534 * it in the linker script.
Ralf Baechle9975e772007-08-13 12:44:41 +0100535 */
David Daney485172b2012-08-14 11:08:01 -0700536pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
Alex Belits3377e222017-02-16 17:27:34 -0800537#ifndef __PAGETABLE_PUD_FOLDED
538pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
539#endif
David Daney325f8a02009-12-04 13:52:36 -0800540#ifndef __PAGETABLE_PMD_FOLDED
David Daney485172b2012-08-14 11:08:01 -0700541pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
James Hoganccf01512015-10-16 16:33:13 +0100542EXPORT_SYMBOL_GPL(invalid_pmd_table);
Atsushi Nemoto69a6c312007-01-24 01:21:05 +0900543#endif
David Daney485172b2012-08-14 11:08:01 -0700544pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
Paul Burtonaa4089e2016-11-07 11:14:12 +0000545EXPORT_SYMBOL(invalid_pte_table);