blob: 4f56617a2392d58e52f96582a3631c4c49a4f5f2 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010019#include <linux/sizes.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010020
Russell King15d07dc2012-03-28 18:30:01 +010021#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010022#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000023#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050024#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010025#include <asm/setup.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010031
32#include <asm/mach/arch.h>
33#include <asm/mach/map.h>
Rob Herringc2794432012-02-29 18:10:58 -060034#include <asm/mach/pci.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010035
36#include "mm.h"
Joonsoo Kimde40614e2013-04-05 03:16:51 +010037#include "tcm.h"
Russell Kingd111e8f2006-09-27 15:27:33 +010038
Russell Kingd111e8f2006-09-27 15:27:33 +010039/*
40 * empty_zero_page is a special page that is used for
41 * zero-initialized data and COW.
42 */
43struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040044EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010045
46/*
47 * The pmd table for the upper-most set of pages.
48 */
49pmd_t *top_pmd;
50
Russell Kingae8f1542006-09-27 15:38:34 +010051#define CPOLICY_UNCACHED 0
52#define CPOLICY_BUFFERED 1
53#define CPOLICY_WRITETHROUGH 2
54#define CPOLICY_WRITEBACK 3
55#define CPOLICY_WRITEALLOC 4
56
57static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
58static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010059pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010060pgprot_t pgprot_kernel;
Christoffer Dallcc577c22013-01-20 18:28:04 -050061pgprot_t pgprot_hyp_device;
62pgprot_t pgprot_s2;
63pgprot_t pgprot_s2_device;
Russell Kingae8f1542006-09-27 15:38:34 +010064
Imre_Deak44b18692007-02-11 13:45:13 +010065EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010066EXPORT_SYMBOL(pgprot_kernel);
67
68struct cachepolicy {
69 const char policy[16];
70 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010071 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000072 pteval_t pte;
Christoffer Dallcc577c22013-01-20 18:28:04 -050073 pteval_t pte_s2;
Russell Kingae8f1542006-09-27 15:38:34 +010074};
75
Christoffer Dallcc577c22013-01-20 18:28:04 -050076#ifdef CONFIG_ARM_LPAE
77#define s2_policy(policy) policy
78#else
79#define s2_policy(policy) 0
80#endif
81
Russell Kingae8f1542006-09-27 15:38:34 +010082static struct cachepolicy cache_policies[] __initdata = {
83 {
84 .policy = "uncached",
85 .cr_mask = CR_W|CR_C,
86 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010087 .pte = L_PTE_MT_UNCACHED,
Christoffer Dallcc577c22013-01-20 18:28:04 -050088 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
Russell Kingae8f1542006-09-27 15:38:34 +010089 }, {
90 .policy = "buffered",
91 .cr_mask = CR_C,
92 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010093 .pte = L_PTE_MT_BUFFERABLE,
Christoffer Dallcc577c22013-01-20 18:28:04 -050094 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
Russell Kingae8f1542006-09-27 15:38:34 +010095 }, {
96 .policy = "writethrough",
97 .cr_mask = 0,
98 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010099 .pte = L_PTE_MT_WRITETHROUGH,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500100 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
Russell Kingae8f1542006-09-27 15:38:34 +0100101 }, {
102 .policy = "writeback",
103 .cr_mask = 0,
104 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +0100105 .pte = L_PTE_MT_WRITEBACK,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500106 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
Russell Kingae8f1542006-09-27 15:38:34 +0100107 }, {
108 .policy = "writealloc",
109 .cr_mask = 0,
110 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +0100111 .pte = L_PTE_MT_WRITEALLOC,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500112 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
Russell Kingae8f1542006-09-27 15:38:34 +0100113 }
114};
115
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100116#ifdef CONFIG_CPU_CP15
Russell Kingae8f1542006-09-27 15:38:34 +0100117/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100118 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100119 * problems by allowing the cache or the cache and
120 * writebuffer to be turned off. (Note: the write
121 * buffer should not be on and the cache off).
122 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100123static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100124{
125 int i;
126
127 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
128 int len = strlen(cache_policies[i].policy);
129
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100130 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100131 cachepolicy = i;
132 cr_alignment &= ~cache_policies[i].cr_mask;
133 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100134 break;
135 }
136 }
137 if (i == ARRAY_SIZE(cache_policies))
138 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000139 /*
140 * This restriction is partly to do with the way we boot; it is
141 * unpredictable to have memory mapped using two different sets of
142 * memory attributes (shared, type, and cache attribs). We can not
143 * change these attributes once the initial assembly has setup the
144 * page tables.
145 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100146 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
147 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
148 cachepolicy = CPOLICY_WRITEBACK;
149 }
Russell Kingae8f1542006-09-27 15:38:34 +0100150 flush_cache_all();
151 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100152 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100153}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100154early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100155
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100156static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100157{
158 char *p = "buffered";
159 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100160 early_cachepolicy(p);
161 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100162}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100164
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100166{
167 char *p = "uncached";
168 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100169 early_cachepolicy(p);
170 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100171}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100172early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100173
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000174#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100175static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100176{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100177 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100178 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100179 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100180 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100181 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100182}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100183early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000184#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100185
186static int __init noalign_setup(char *__unused)
187{
188 cr_alignment &= ~CR_A;
189 cr_no_alignment &= ~CR_A;
190 set_cr(cr_alignment);
191 return 1;
192}
193__setup("noalign", noalign_setup);
194
Russell King255d1f82006-12-18 00:12:47 +0000195#ifndef CONFIG_SMP
196void adjust_cr(unsigned long mask, unsigned long set)
197{
198 unsigned long flags;
199
200 mask &= ~CR_A;
201
202 set &= mask;
203
204 local_irq_save(flags);
205
206 cr_no_alignment = (cr_no_alignment & ~mask) | set;
207 cr_alignment = (cr_alignment & ~mask) | set;
208
209 set_cr((get_cr() & ~mask) | set);
210
211 local_irq_restore(flags);
212}
213#endif
214
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100215#else /* ifdef CONFIG_CPU_CP15 */
216
217static int __init early_cachepolicy(char *p)
218{
219 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
220}
221early_param("cachepolicy", early_cachepolicy);
222
223static int __init noalign_setup(char *__unused)
224{
225 pr_warning("noalign kernel parameter not supported without cp15\n");
226}
227__setup("noalign", noalign_setup);
228
229#endif /* ifdef CONFIG_CPU_CP15 / else */
230
Russell King36bb94b2010-11-16 08:40:36 +0000231#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000232#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100233
Russell Kingb29e9f52007-04-21 10:47:29 +0100234static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100235 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100236 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
237 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100238 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000239 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100240 .domain = DOMAIN_IO,
241 },
242 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100243 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100244 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000245 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100246 .domain = DOMAIN_IO,
247 },
248 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100249 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100250 .prot_l1 = PMD_TYPE_TABLE,
251 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
252 .domain = DOMAIN_IO,
Rob Herringc2794432012-02-29 18:10:58 -0600253 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100254 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100255 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100256 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000257 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100258 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100259 },
Russell Kingebb4c652008-11-09 11:18:36 +0000260 [MT_UNCACHED] = {
261 .prot_pte = PROT_PTE_DEVICE,
262 .prot_l1 = PMD_TYPE_TABLE,
263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
264 .domain = DOMAIN_IO,
265 },
Russell Kingae8f1542006-09-27 15:38:34 +0100266 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100268 .domain = DOMAIN_KERNEL,
269 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000270#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100271 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100272 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100273 .domain = DOMAIN_KERNEL,
274 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000275#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100276 [MT_LOW_VECTORS] = {
277 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000278 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100279 .prot_l1 = PMD_TYPE_TABLE,
280 .domain = DOMAIN_USER,
281 },
282 [MT_HIGH_VECTORS] = {
283 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000284 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100285 .prot_l1 = PMD_TYPE_TABLE,
286 .domain = DOMAIN_USER,
287 },
288 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000289 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100290 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100291 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100292 .domain = DOMAIN_KERNEL,
293 },
294 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100295 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100296 .domain = DOMAIN_KERNEL,
297 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100298 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100299 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000300 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100301 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100302 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
303 .domain = DOMAIN_KERNEL,
304 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100305 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100306 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000307 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100308 .prot_l1 = PMD_TYPE_TABLE,
309 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
310 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100311 },
312 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000313 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100314 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100315 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100316 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700317 [MT_MEMORY_SO] = {
318 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Santosh Shilimkar93d5bf02013-01-17 07:18:04 +0100319 L_PTE_MT_UNCACHED | L_PTE_XN,
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700320 .prot_l1 = PMD_TYPE_TABLE,
321 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
322 PMD_SECT_UNCACHED | PMD_SECT_XN,
323 .domain = DOMAIN_KERNEL,
324 },
Marek Szyprowskic7909502011-12-29 13:09:51 +0100325 [MT_MEMORY_DMA_READY] = {
326 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
327 .prot_l1 = PMD_TYPE_TABLE,
328 .domain = DOMAIN_KERNEL,
329 },
Russell Kingae8f1542006-09-27 15:38:34 +0100330};
331
Russell Kingb29e9f52007-04-21 10:47:29 +0100332const struct mem_type *get_mem_type(unsigned int type)
333{
334 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
335}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200336EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100337
Russell Kingae8f1542006-09-27 15:38:34 +0100338/*
339 * Adjust the PMD section entries according to the CPU in use.
340 */
341static void __init build_mem_type_table(void)
342{
343 struct cachepolicy *cp;
344 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100345 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500346 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100347 int cpu_arch = cpu_architecture();
348 int i;
349
Catalin Marinas11179d82007-07-20 11:42:24 +0100350 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100351#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100352 if (cachepolicy > CPOLICY_BUFFERED)
353 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100354#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100355 if (cachepolicy > CPOLICY_WRITETHROUGH)
356 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100357#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100358 }
Russell Kingae8f1542006-09-27 15:38:34 +0100359 if (cpu_arch < CPU_ARCH_ARMv5) {
360 if (cachepolicy >= CPOLICY_WRITEALLOC)
361 cachepolicy = CPOLICY_WRITEBACK;
362 ecc_mask = 0;
363 }
Russell Kingf00ec482010-09-04 10:47:48 +0100364 if (is_smp())
365 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100366
367 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000368 * Strip out features not present on earlier architectures.
369 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
370 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100371 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000372 if (cpu_arch < CPU_ARCH_ARMv5)
373 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
374 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
375 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
376 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
377 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100378
379 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000380 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
381 * "update-able on write" bit on ARM610). However, Xscale and
382 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100383 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000384 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100385 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100386 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100387 mem_types[i].prot_l1 &= ~PMD_BIT4;
388 }
389 } else if (cpu_arch < CPU_ARCH_ARMv6) {
390 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100391 if (mem_types[i].prot_l1)
392 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100393 if (mem_types[i].prot_sect)
394 mem_types[i].prot_sect |= PMD_BIT4;
395 }
396 }
Russell Kingae8f1542006-09-27 15:38:34 +0100397
Russell Kingb1cce6b2008-11-04 10:52:28 +0000398 /*
399 * Mark the device areas according to the CPU/architecture.
400 */
401 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
402 if (!cpu_is_xsc3()) {
403 /*
404 * Mark device regions on ARMv6+ as execute-never
405 * to prevent speculative instruction fetches.
406 */
407 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
408 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
409 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
410 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
411 }
412 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
413 /*
414 * For ARMv7 with TEX remapping,
415 * - shared device is SXCB=1100
416 * - nonshared device is SXCB=0100
417 * - write combine device mem is SXCB=0001
418 * (Uncached Normal memory)
419 */
420 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
421 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
422 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
423 } else if (cpu_is_xsc3()) {
424 /*
425 * For Xscale3,
426 * - shared device is TEXCB=00101
427 * - nonshared device is TEXCB=01000
428 * - write combine device mem is TEXCB=00100
429 * (Inner/Outer Uncacheable in xsc3 parlance)
430 */
431 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
432 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
433 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
434 } else {
435 /*
436 * For ARMv6 and ARMv7 without TEX remapping,
437 * - shared device is TEXCB=00001
438 * - nonshared device is TEXCB=01000
439 * - write combine device mem is TEXCB=00100
440 * (Uncached Normal in ARMv6 parlance).
441 */
442 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
443 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
444 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
445 }
446 } else {
447 /*
448 * On others, write combining is "Uncached/Buffered"
449 */
450 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
451 }
452
453 /*
454 * Now deal with the memory-type mappings
455 */
Russell Kingae8f1542006-09-27 15:38:34 +0100456 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100457 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500458 s2_pgprot = cp->pte_s2;
459 hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
Russell Kingbb30f362008-09-06 20:04:59 +0100460
Russell Kingbb30f362008-09-06 20:04:59 +0100461 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100462 * ARMv6 and above have extended page tables.
463 */
464 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000465#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100466 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100467 * Mark cache clean areas and XIP ROM read only
468 * from SVC mode and no access from userspace.
469 */
470 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
471 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
472 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000473#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100474
Russell Kingf00ec482010-09-04 10:47:48 +0100475 if (is_smp()) {
476 /*
477 * Mark memory with the "shared" attribute
478 * for SMP systems
479 */
480 user_pgprot |= L_PTE_SHARED;
481 kern_pgprot |= L_PTE_SHARED;
482 vecs_pgprot |= L_PTE_SHARED;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500483 s2_pgprot |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100484 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
485 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
486 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
487 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
488 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
489 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100490 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100491 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
492 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
493 }
Russell Kingae8f1542006-09-27 15:38:34 +0100494 }
495
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100496 /*
497 * Non-cacheable Normal - intended for memory areas that must
498 * not cause dirty cache line writebacks when used
499 */
500 if (cpu_arch >= CPU_ARCH_ARMv6) {
501 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
502 /* Non-cacheable Normal is XCB = 001 */
503 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
504 PMD_SECT_BUFFERED;
505 } else {
506 /* For both ARMv6 and non-TEX-remapping ARMv7 */
507 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
508 PMD_SECT_TEX(1);
509 }
510 } else {
511 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
512 }
513
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000514#ifdef CONFIG_ARM_LPAE
515 /*
516 * Do not generate access flag faults for the kernel mappings.
517 */
518 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
519 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100520 if (mem_types[i].prot_sect)
521 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000522 }
523 kern_pgprot |= PTE_EXT_AF;
524 vecs_pgprot |= PTE_EXT_AF;
525#endif
526
Russell Kingae8f1542006-09-27 15:38:34 +0100527 for (i = 0; i < 16; i++) {
Will Deacon864aa042012-09-18 19:18:35 +0100528 pteval_t v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100529 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100530 }
531
Russell Kingbb30f362008-09-06 20:04:59 +0100532 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
533 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100534
Imre_Deak44b18692007-02-11 13:45:13 +0100535 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100536 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000537 L_PTE_DIRTY | kern_pgprot);
Christoffer Dallcc577c22013-01-20 18:28:04 -0500538 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
539 pgprot_s2_device = __pgprot(s2_device_pgprot);
540 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100541
542 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
543 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
544 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100545 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100546 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100547 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100548 mem_types[MT_ROM].prot_sect |= cp->pmd;
549
550 switch (cp->pmd) {
551 case PMD_SECT_WT:
552 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
553 break;
554 case PMD_SECT_WB:
555 case PMD_SECT_WBWA:
556 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
557 break;
558 }
559 printk("Memory policy: ECC %sabled, Data cache %s\n",
560 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100561
562 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
563 struct mem_type *t = &mem_types[i];
564 if (t->prot_l1)
565 t->prot_l1 |= PMD_DOMAIN(t->domain);
566 if (t->prot_sect)
567 t->prot_sect |= PMD_DOMAIN(t->domain);
568 }
Russell Kingae8f1542006-09-27 15:38:34 +0100569}
570
Catalin Marinasd9073872010-09-13 16:01:24 +0100571#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
572pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
573 unsigned long size, pgprot_t vma_prot)
574{
575 if (!pfn_valid(pfn))
576 return pgprot_noncached(vma_prot);
577 else if (file->f_flags & O_SYNC)
578 return pgprot_writecombine(vma_prot);
579 return vma_prot;
580}
581EXPORT_SYMBOL(phys_mem_access_prot);
582#endif
583
Russell Kingae8f1542006-09-27 15:38:34 +0100584#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
585
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400586static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000587{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400588 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100589 memset(ptr, 0, sz);
590 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000591}
592
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400593static void __init *early_alloc(unsigned long sz)
594{
595 return early_alloc_aligned(sz, sz);
596}
597
Russell King4bb2e272010-07-01 18:33:29 +0100598static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
599{
600 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100601 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000602 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100603 }
604 BUG_ON(pmd_bad(*pmd));
605 return pte_offset_kernel(pmd, addr);
606}
607
Russell King24e6c692007-04-21 10:21:28 +0100608static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
609 unsigned long end, unsigned long pfn,
610 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100611{
Russell King4bb2e272010-07-01 18:33:29 +0100612 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100613 do {
Russell King40d192b2008-09-06 21:15:56 +0100614 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100615 pfn++;
616 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100617}
618
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100619static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
Sricharan Re651eab2013-03-18 12:24:04 +0100620 unsigned long end, phys_addr_t phys,
621 const struct mem_type *type)
622{
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100623 pmd_t *p = pmd;
624
Sricharan Re651eab2013-03-18 12:24:04 +0100625#ifndef CONFIG_ARM_LPAE
626 /*
627 * In classic MMU format, puds and pmds are folded in to
628 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
629 * group of L1 entries making up one logical pointer to
630 * an L2 table (2MB), where as PMDs refer to the individual
631 * L1 entries (1MB). Hence increment to get the correct
632 * offset for odd 1MB sections.
633 * (See arch/arm/include/asm/pgtable-2level.h)
634 */
635 if (addr & SECTION_SIZE)
636 pmd++;
637#endif
638 do {
639 *pmd = __pmd(phys | type->prot_sect);
640 phys += SECTION_SIZE;
641 } while (pmd++, addr += SECTION_SIZE, addr != end);
642
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100643 flush_pmd_entry(p);
Sricharan Re651eab2013-03-18 12:24:04 +0100644}
645
646static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000647 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100648 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100649{
Russell King516295e2010-11-21 16:27:49 +0000650 pmd_t *pmd = pmd_offset(pud, addr);
Sricharan Re651eab2013-03-18 12:24:04 +0100651 unsigned long next;
Russell Kingae8f1542006-09-27 15:38:34 +0100652
Sricharan Re651eab2013-03-18 12:24:04 +0100653 do {
Russell King24e6c692007-04-21 10:21:28 +0100654 /*
Sricharan Re651eab2013-03-18 12:24:04 +0100655 * With LPAE, we must loop over to map
656 * all the pmds for the given range.
Russell King24e6c692007-04-21 10:21:28 +0100657 */
Sricharan Re651eab2013-03-18 12:24:04 +0100658 next = pmd_addr_end(addr, end);
659
660 /*
661 * Try a section mapping - addr, next and phys must all be
662 * aligned to a section boundary.
663 */
664 if (type->prot_sect &&
665 ((addr | next | phys) & ~SECTION_MASK) == 0) {
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100666 __map_init_section(pmd, addr, next, phys, type);
Sricharan Re651eab2013-03-18 12:24:04 +0100667 } else {
668 alloc_init_pte(pmd, addr, next,
669 __phys_to_pfn(phys), type);
670 }
671
672 phys += next - addr;
673
674 } while (pmd++, addr = next, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100675}
676
Stephen Boyd14904922012-04-27 01:40:10 +0100677static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Vitaly Andrianov20d69562012-07-10 14:41:17 -0400678 unsigned long end, phys_addr_t phys,
679 const struct mem_type *type)
Russell King516295e2010-11-21 16:27:49 +0000680{
681 pud_t *pud = pud_offset(pgd, addr);
682 unsigned long next;
683
684 do {
685 next = pud_addr_end(addr, end);
Sricharan Re651eab2013-03-18 12:24:04 +0100686 alloc_init_pmd(pud, addr, next, phys, type);
Russell King516295e2010-11-21 16:27:49 +0000687 phys += next - addr;
688 } while (pud++, addr = next, addr != end);
689}
690
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000691#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100692static void __init create_36bit_mapping(struct map_desc *md,
693 const struct mem_type *type)
694{
Russell King97092e02010-11-16 00:16:01 +0000695 unsigned long addr, length, end;
696 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100697 pgd_t *pgd;
698
699 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100700 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100701 length = PAGE_ALIGN(md->length);
702
703 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
704 printk(KERN_ERR "MM: CPU does not support supersection "
705 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100706 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100707 return;
708 }
709
710 /* N.B. ARMv6 supersections are only defined to work with domain 0.
711 * Since domain assignments can in fact be arbitrary, the
712 * 'domain == 0' check below is required to insure that ARMv6
713 * supersections are only allocated for domain 0 regardless
714 * of the actual domain assignments in use.
715 */
716 if (type->domain) {
717 printk(KERN_ERR "MM: invalid domain in supersection "
718 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100719 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100720 return;
721 }
722
723 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100724 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
725 " at 0x%08lx invalid alignment\n",
726 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100727 return;
728 }
729
730 /*
731 * Shift bits [35:32] of address into bits [23:20] of PMD
732 * (See ARMv6 spec).
733 */
734 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
735
736 pgd = pgd_offset_k(addr);
737 end = addr + length;
738 do {
Russell King516295e2010-11-21 16:27:49 +0000739 pud_t *pud = pud_offset(pgd, addr);
740 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100741 int i;
742
743 for (i = 0; i < 16; i++)
744 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
745
746 addr += SUPERSECTION_SIZE;
747 phys += SUPERSECTION_SIZE;
748 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
749 } while (addr != end);
750}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000751#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100752
Russell Kingae8f1542006-09-27 15:38:34 +0100753/*
754 * Create the page directory entries and any necessary
755 * page tables for the mapping specified by `md'. We
756 * are able to cope here with varying sizes and address
757 * offsets, and we take full advantage of sections and
758 * supersections.
759 */
Russell Kinga2227122010-03-25 18:56:05 +0000760static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100761{
Will Deaconcae62922011-02-15 12:42:57 +0100762 unsigned long addr, length, end;
763 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100764 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100765 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100766
767 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100768 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
769 " at 0x%08lx in user region\n",
770 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100771 return;
772 }
773
774 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400775 md->virtual >= PAGE_OFFSET &&
776 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100777 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400778 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100779 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100780 }
781
Russell Kingd5c98172007-04-21 10:05:32 +0100782 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100783
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000784#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100785 /*
786 * Catch 36-bit addresses
787 */
Russell King4a56c1e2007-04-21 10:16:48 +0100788 if (md->pfn >= 0x100000) {
789 create_36bit_mapping(md, type);
790 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100791 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000792#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100793
Russell King7b9c7b42007-07-04 21:16:33 +0100794 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100795 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100796 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100797
Russell King24e6c692007-04-21 10:21:28 +0100798 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100799 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100800 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100801 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100802 return;
803 }
804
Russell King24e6c692007-04-21 10:21:28 +0100805 pgd = pgd_offset_k(addr);
806 end = addr + length;
807 do {
808 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100809
Russell King516295e2010-11-21 16:27:49 +0000810 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100811
Russell King24e6c692007-04-21 10:21:28 +0100812 phys += next - addr;
813 addr = next;
814 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100815}
816
817/*
818 * Create the architecture specific mappings
819 */
820void __init iotable_init(struct map_desc *io_desc, int nr)
821{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400822 struct map_desc *md;
823 struct vm_struct *vm;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100824 struct static_vm *svm;
Russell Kingae8f1542006-09-27 15:38:34 +0100825
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400826 if (!nr)
827 return;
828
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100829 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400830
831 for (md = io_desc; nr; md++, nr--) {
832 create_mapping(md);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100833
834 vm = &svm->vm;
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400835 vm->addr = (void *)(md->virtual & PAGE_MASK);
836 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Rob Herringc2794432012-02-29 18:10:58 -0600837 vm->phys_addr = __pfn_to_phys(md->pfn);
838 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400839 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400840 vm->caller = iotable_init;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100841 add_static_vm_early(svm++);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400842 }
Russell Kingae8f1542006-09-27 15:38:34 +0100843}
844
Rob Herringc2794432012-02-29 18:10:58 -0600845void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
846 void *caller)
847{
848 struct vm_struct *vm;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100849 struct static_vm *svm;
Rob Herringc2794432012-02-29 18:10:58 -0600850
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100851 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
852
853 vm = &svm->vm;
Rob Herringc2794432012-02-29 18:10:58 -0600854 vm->addr = (void *)addr;
855 vm->size = size;
Arnd Bergmann863e99a2012-09-04 15:01:37 +0200856 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Rob Herringc2794432012-02-29 18:10:58 -0600857 vm->caller = caller;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100858 add_static_vm_early(svm);
Rob Herringc2794432012-02-29 18:10:58 -0600859}
860
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100861#ifndef CONFIG_ARM_LPAE
862
863/*
864 * The Linux PMD is made of two consecutive section entries covering 2MB
865 * (see definition in include/asm/pgtable-2level.h). However a call to
866 * create_mapping() may optimize static mappings by using individual
867 * 1MB section mappings. This leaves the actual PMD potentially half
868 * initialized if the top or bottom section entry isn't used, leaving it
869 * open to problems if a subsequent ioremap() or vmalloc() tries to use
870 * the virtual space left free by that unused section entry.
871 *
872 * Let's avoid the issue by inserting dummy vm entries covering the unused
873 * PMD halves once the static mappings are in place.
874 */
875
876static void __init pmd_empty_section_gap(unsigned long addr)
877{
Rob Herringc2794432012-02-29 18:10:58 -0600878 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100879}
880
881static void __init fill_pmd_gaps(void)
882{
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100883 struct static_vm *svm;
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100884 struct vm_struct *vm;
885 unsigned long addr, next = 0;
886 pmd_t *pmd;
887
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100888 list_for_each_entry(svm, &static_vmlist, list) {
889 vm = &svm->vm;
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100890 addr = (unsigned long)vm->addr;
891 if (addr < next)
892 continue;
893
894 /*
895 * Check if this vm starts on an odd section boundary.
896 * If so and the first section entry for this PMD is free
897 * then we block the corresponding virtual address.
898 */
899 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
900 pmd = pmd_off_k(addr);
901 if (pmd_none(*pmd))
902 pmd_empty_section_gap(addr & PMD_MASK);
903 }
904
905 /*
906 * Then check if this vm ends on an odd section boundary.
907 * If so and the second section entry for this PMD is empty
908 * then we block the corresponding virtual address.
909 */
910 addr += vm->size;
911 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
912 pmd = pmd_off_k(addr) + 1;
913 if (pmd_none(*pmd))
914 pmd_empty_section_gap(addr);
915 }
916
917 /* no need to look at any vm entry until we hit the next PMD */
918 next = (addr + PMD_SIZE - 1) & PMD_MASK;
919 }
920}
921
922#else
923#define fill_pmd_gaps() do { } while (0)
924#endif
925
Rob Herringc2794432012-02-29 18:10:58 -0600926#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
927static void __init pci_reserve_io(void)
928{
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100929 struct static_vm *svm;
Rob Herringc2794432012-02-29 18:10:58 -0600930
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100931 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
932 if (svm)
933 return;
Rob Herringc2794432012-02-29 18:10:58 -0600934
Rob Herringc2794432012-02-29 18:10:58 -0600935 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
936}
937#else
938#define pci_reserve_io() do { } while (0)
939#endif
940
Rob Herringe5c5f2a2012-10-22 11:42:54 -0600941#ifdef CONFIG_DEBUG_LL
942void __init debug_ll_io_init(void)
943{
944 struct map_desc map;
945
946 debug_ll_addr(&map.pfn, &map.virtual);
947 if (!map.pfn || !map.virtual)
948 return;
949 map.pfn = __phys_to_pfn(map.pfn);
950 map.virtual &= PAGE_MASK;
951 map.length = PAGE_SIZE;
952 map.type = MT_DEVICE;
Stephen Boydee4de5d2013-07-06 00:25:51 +0100953 iotable_init(&map, 1);
Rob Herringe5c5f2a2012-10-22 11:42:54 -0600954}
955#endif
956
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400957static void * __initdata vmalloc_min =
958 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100959
960/*
961 * vmalloc=size forces the vmalloc area to be exactly 'size'
962 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400963 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100964 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100965static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100966{
Russell King79612392010-05-22 16:20:14 +0100967 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100968
969 if (vmalloc_reserve < SZ_16M) {
970 vmalloc_reserve = SZ_16M;
971 printk(KERN_WARNING
972 "vmalloc area too small, limiting to %luMB\n",
973 vmalloc_reserve >> 20);
974 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400975
976 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
977 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
978 printk(KERN_WARNING
979 "vmalloc area is too big, limiting to %luMB\n",
980 vmalloc_reserve >> 20);
981 }
Russell King79612392010-05-22 16:20:14 +0100982
983 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100984 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100985}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100986early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100987
Marek Szyprowskic7909502011-12-29 13:09:51 +0100988phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +0100989
Russell King0371d3f2011-07-05 19:58:29 +0100990void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200991{
Russell Kingdde58282009-08-15 12:36:00 +0100992 int i, j, highmem = 0;
Cyril Chemparathy82f66702012-07-20 12:01:23 -0400993 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200994
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400995 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400996 struct membank *bank = &meminfo.bank[j];
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -0400997 phys_addr_t size_limit;
998
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400999 *bank = meminfo.bank[i];
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001000 size_limit = bank->size;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001001
Cyril Chemparathy82f66702012-07-20 12:01:23 -04001002 if (bank->start >= vmalloc_limit)
Will Deacon77f73a22011-11-22 17:30:32 +00001003 highmem = 1;
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001004 else
1005 size_limit = vmalloc_limit - bank->start;
Russell Kingdde58282009-08-15 12:36:00 +01001006
1007 bank->highmem = highmem;
1008
Cyril Chemparathyadf2e9f2012-07-20 12:24:45 -04001009#ifdef CONFIG_HIGHMEM
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001010 /*
1011 * Split those memory banks which are partially overlapping
1012 * the vmalloc area greatly simplifying things later.
1013 */
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001014 if (!highmem && bank->size > size_limit) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001015 if (meminfo.nr_banks >= NR_BANKS) {
1016 printk(KERN_CRIT "NR_BANKS too low, "
1017 "ignoring high memory\n");
1018 } else {
1019 memmove(bank + 1, bank,
1020 (meminfo.nr_banks - i) * sizeof(*bank));
1021 meminfo.nr_banks++;
1022 i++;
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001023 bank[1].size -= size_limit;
Cyril Chemparathy82f66702012-07-20 12:01:23 -04001024 bank[1].start = vmalloc_limit;
Russell Kingdde58282009-08-15 12:36:00 +01001025 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001026 j++;
1027 }
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001028 bank->size = size_limit;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001029 }
1030#else
1031 /*
Will Deacon77f73a22011-11-22 17:30:32 +00001032 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1033 */
1034 if (highmem) {
1035 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1036 "(!CONFIG_HIGHMEM).\n",
1037 (unsigned long long)bank->start,
1038 (unsigned long long)bank->start + bank->size - 1);
1039 continue;
1040 }
1041
1042 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001043 * Check whether this memory bank would partially overlap
1044 * the vmalloc area.
1045 */
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001046 if (bank->size > size_limit) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001047 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1048 "to -%.8llx (vmalloc region overlap).\n",
1049 (unsigned long long)bank->start,
1050 (unsigned long long)bank->start + bank->size - 1,
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001051 (unsigned long long)bank->start + size_limit - 1);
1052 bank->size = size_limit;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001053 }
1054#endif
Marek Szyprowskic7909502011-12-29 13:09:51 +01001055 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1056 arm_lowmem_limit = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001057
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001058 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001059 }
Russell Kinge616c592009-09-27 20:55:43 +01001060#ifdef CONFIG_HIGHMEM
1061 if (highmem) {
1062 const char *reason = NULL;
1063
1064 if (cache_is_vipt_aliasing()) {
1065 /*
1066 * Interactions between kmap and other mappings
1067 * make highmem support with aliasing VIPT caches
1068 * rather difficult.
1069 */
1070 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001071 }
1072 if (reason) {
1073 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1074 reason);
1075 while (j > 0 && meminfo.bank[j - 1].highmem)
1076 j--;
1077 }
1078 }
1079#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001080 meminfo.nr_banks = j;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001081 high_memory = __va(arm_lowmem_limit - 1) + 1;
1082 memblock_set_current_limit(arm_lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001083}
1084
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001085static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001086{
1087 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001088 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001089
1090 /*
1091 * Clear out all the mappings below the kernel image.
1092 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001093 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001094 pmd_clear(pmd_off_k(addr));
1095
1096#ifdef CONFIG_XIP_KERNEL
1097 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001098 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001099#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001100 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001101 pmd_clear(pmd_off_k(addr));
1102
1103 /*
Russell King8df65162010-10-27 19:57:38 +01001104 * Find the end of the first block of lowmem.
1105 */
1106 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001107 if (end >= arm_lowmem_limit)
1108 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001109
1110 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001111 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001112 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001113 */
Russell King8df65162010-10-27 19:57:38 +01001114 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001115 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001116 pmd_clear(pmd_off_k(addr));
1117}
1118
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001119#ifdef CONFIG_ARM_LPAE
1120/* the first page is reserved for pgd */
1121#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1122 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1123#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001124#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001125#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001126
Russell Kingd111e8f2006-09-27 15:27:33 +01001127/*
Russell King2778f622010-07-09 16:27:52 +01001128 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001129 */
Russell King2778f622010-07-09 16:27:52 +01001130void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001131{
Russell Kingd111e8f2006-09-27 15:27:33 +01001132 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001133 * Reserve the page tables. These are already in use,
1134 * and can only be in node 0.
1135 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001136 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001137
Russell Kingd111e8f2006-09-27 15:27:33 +01001138#ifdef CONFIG_SA1111
1139 /*
1140 * Because of the SA1111 DMA bug, we want to preserve our
1141 * precious DMA-able memory...
1142 */
Russell King2778f622010-07-09 16:27:52 +01001143 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001144#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001145}
1146
1147/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001148 * Set up the device mappings. Since we clear out the page tables for all
1149 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001150 * This means you have to be careful how you debug this function, or any
1151 * called function. This means you can't use any function or debugging
1152 * method which may touch any device, otherwise the kernel _will_ crash.
1153 */
1154static void __init devicemaps_init(struct machine_desc *mdesc)
1155{
1156 struct map_desc map;
1157 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001158 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001159
1160 /*
1161 * Allocate the vector page early.
1162 */
Russell King94e5a852012-01-18 15:32:49 +00001163 vectors = early_alloc(PAGE_SIZE);
1164
1165 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001166
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001167 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001168 pmd_clear(pmd_off_k(addr));
1169
1170 /*
1171 * Map the kernel if it is XIP.
1172 * It is always first in the modulearea.
1173 */
1174#ifdef CONFIG_XIP_KERNEL
1175 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001176 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001177 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001178 map.type = MT_ROM;
1179 create_mapping(&map);
1180#endif
1181
1182 /*
1183 * Map the cache flushing regions.
1184 */
1185#ifdef FLUSH_BASE
1186 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1187 map.virtual = FLUSH_BASE;
1188 map.length = SZ_1M;
1189 map.type = MT_CACHECLEAN;
1190 create_mapping(&map);
1191#endif
1192#ifdef FLUSH_BASE_MINICACHE
1193 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1194 map.virtual = FLUSH_BASE_MINICACHE;
1195 map.length = SZ_1M;
1196 map.type = MT_MINICLEAN;
1197 create_mapping(&map);
1198#endif
1199
1200 /*
1201 * Create a mapping for the machine vectors at the high-vectors
1202 * location (0xffff0000). If we aren't using high-vectors, also
1203 * create a mapping at the low-vectors virtual address.
1204 */
Russell King94e5a852012-01-18 15:32:49 +00001205 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001206 map.virtual = 0xffff0000;
1207 map.length = PAGE_SIZE;
1208 map.type = MT_HIGH_VECTORS;
1209 create_mapping(&map);
1210
1211 if (!vectors_high()) {
1212 map.virtual = 0;
1213 map.type = MT_LOW_VECTORS;
1214 create_mapping(&map);
1215 }
1216
1217 /*
1218 * Ask the machine support to map in the statically mapped devices.
1219 */
1220 if (mdesc->map_io)
1221 mdesc->map_io();
Maxime Ripardbc373242013-04-18 21:52:23 +02001222 else
1223 debug_ll_io_init();
Nicolas Pitre19b52ab2012-06-27 17:28:57 +01001224 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001225
Rob Herringc2794432012-02-29 18:10:58 -06001226 /* Reserve fixed i/o space in VMALLOC region */
1227 pci_reserve_io();
1228
Russell Kingd111e8f2006-09-27 15:27:33 +01001229 /*
1230 * Finally flush the caches and tlb to ensure that we're in a
1231 * consistent state wrt the writebuffer. This also ensures that
1232 * any write-allocated cache lines in the vector page are written
1233 * back. After this point, we can start to touch devices again.
1234 */
1235 local_flush_tlb_all();
1236 flush_cache_all();
1237}
1238
Nicolas Pitred73cd422008-09-15 16:44:55 -04001239static void __init kmap_init(void)
1240{
1241#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +01001242 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1243 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001244#endif
1245}
1246
Russell Kinga2227122010-03-25 18:56:05 +00001247static void __init map_lowmem(void)
1248{
Russell King8df65162010-10-27 19:57:38 +01001249 struct memblock_region *reg;
Russell Kinga2227122010-03-25 18:56:05 +00001250
1251 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001252 for_each_memblock(memory, reg) {
1253 phys_addr_t start = reg->base;
1254 phys_addr_t end = start + reg->size;
1255 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001256
Marek Szyprowskic7909502011-12-29 13:09:51 +01001257 if (end > arm_lowmem_limit)
1258 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001259 if (start >= end)
1260 break;
1261
1262 map.pfn = __phys_to_pfn(start);
1263 map.virtual = __phys_to_virt(start);
1264 map.length = end - start;
1265 map.type = MT_MEMORY;
1266
1267 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001268 }
1269}
1270
Russell Kingd111e8f2006-09-27 15:27:33 +01001271/*
1272 * paging_init() sets up the page tables, initialises the zone memory
1273 * maps, and sets up the zero page, bad page and bad page tables.
1274 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001275void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001276{
1277 void *zero_page;
1278
Marek Szyprowskic7909502011-12-29 13:09:51 +01001279 memblock_set_current_limit(arm_lowmem_limit);
Russell King0371d3f2011-07-05 19:58:29 +01001280
Russell Kingd111e8f2006-09-27 15:27:33 +01001281 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001282 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001283 map_lowmem();
Marek Szyprowskic7909502011-12-29 13:09:51 +01001284 dma_contiguous_remap();
Russell Kingd111e8f2006-09-27 15:27:33 +01001285 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001286 kmap_init();
Joonsoo Kimde40614e2013-04-05 03:16:51 +01001287 tcm_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001288
1289 top_pmd = pmd_off_k(0xffff0000);
1290
Russell King3abe9d32010-03-25 17:02:59 +00001291 /* allocate the zero page. */
1292 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001293
Russell King8d717a52010-05-22 19:47:18 +01001294 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001295
Russell Kingd111e8f2006-09-27 15:27:33 +01001296 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001297 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001298}