blob: df875c457068a8b6e022cf15bfde54e752337de7 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010019#include <linux/sizes.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010020
Russell King15d07dc2012-03-28 18:30:01 +010021#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010022#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000023#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050024#include <asm/cachetype.h>
Russell Kingebd49222013-10-24 08:12:39 +010025#include <asm/sections.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010026#include <asm/setup.h>
Russell Kinge616c592009-09-27 20:55:43 +010027#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010028#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040029#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010030#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010031#include <asm/traps.h>
Santosh Shilimkara77e0c72013-07-31 12:44:46 -040032#include <asm/procinfo.h>
33#include <asm/memory.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010034
35#include <asm/mach/arch.h>
36#include <asm/mach/map.h>
Rob Herringc2794432012-02-29 18:10:58 -060037#include <asm/mach/pci.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010038
39#include "mm.h"
Joonsoo Kimde40614e2013-04-05 03:16:51 +010040#include "tcm.h"
Russell Kingd111e8f2006-09-27 15:27:33 +010041
Russell Kingd111e8f2006-09-27 15:27:33 +010042/*
43 * empty_zero_page is a special page that is used for
44 * zero-initialized data and COW.
45 */
46struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040047EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010048
49/*
50 * The pmd table for the upper-most set of pages.
51 */
52pmd_t *top_pmd;
53
Russell Kingae8f1542006-09-27 15:38:34 +010054#define CPOLICY_UNCACHED 0
55#define CPOLICY_BUFFERED 1
56#define CPOLICY_WRITETHROUGH 2
57#define CPOLICY_WRITEBACK 3
58#define CPOLICY_WRITEALLOC 4
59
60static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
61static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010062pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010063pgprot_t pgprot_kernel;
Christoffer Dallcc577c22013-01-20 18:28:04 -050064pgprot_t pgprot_hyp_device;
65pgprot_t pgprot_s2;
66pgprot_t pgprot_s2_device;
Russell Kingae8f1542006-09-27 15:38:34 +010067
Imre_Deak44b18692007-02-11 13:45:13 +010068EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010069EXPORT_SYMBOL(pgprot_kernel);
70
71struct cachepolicy {
72 const char policy[16];
73 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010074 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000075 pteval_t pte;
Christoffer Dallcc577c22013-01-20 18:28:04 -050076 pteval_t pte_s2;
Russell Kingae8f1542006-09-27 15:38:34 +010077};
78
Christoffer Dallcc577c22013-01-20 18:28:04 -050079#ifdef CONFIG_ARM_LPAE
80#define s2_policy(policy) policy
81#else
82#define s2_policy(policy) 0
83#endif
84
Russell Kingae8f1542006-09-27 15:38:34 +010085static struct cachepolicy cache_policies[] __initdata = {
86 {
87 .policy = "uncached",
88 .cr_mask = CR_W|CR_C,
89 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010090 .pte = L_PTE_MT_UNCACHED,
Christoffer Dallcc577c22013-01-20 18:28:04 -050091 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
Russell Kingae8f1542006-09-27 15:38:34 +010092 }, {
93 .policy = "buffered",
94 .cr_mask = CR_C,
95 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010096 .pte = L_PTE_MT_BUFFERABLE,
Christoffer Dallcc577c22013-01-20 18:28:04 -050097 .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
Russell Kingae8f1542006-09-27 15:38:34 +010098 }, {
99 .policy = "writethrough",
100 .cr_mask = 0,
101 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +0100102 .pte = L_PTE_MT_WRITETHROUGH,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500103 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
Russell Kingae8f1542006-09-27 15:38:34 +0100104 }, {
105 .policy = "writeback",
106 .cr_mask = 0,
107 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +0100108 .pte = L_PTE_MT_WRITEBACK,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500109 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
Russell Kingae8f1542006-09-27 15:38:34 +0100110 }, {
111 .policy = "writealloc",
112 .cr_mask = 0,
113 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +0100114 .pte = L_PTE_MT_WRITEALLOC,
Christoffer Dallcc577c22013-01-20 18:28:04 -0500115 .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
Russell Kingae8f1542006-09-27 15:38:34 +0100116 }
117};
118
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100119#ifdef CONFIG_CPU_CP15
Russell King20e7e362014-06-02 09:29:37 +0100120static unsigned long initial_pmd_value __initdata = 0;
121
Russell Kingae8f1542006-09-27 15:38:34 +0100122/*
Russell Kingca8f0b02014-05-27 20:34:28 +0100123 * Initialise the cache_policy variable with the initial state specified
124 * via the "pmd" value. This is used to ensure that on ARMv6 and later,
125 * the C code sets the page tables up with the same policy as the head
126 * assembly code, which avoids an illegal state where the TLBs can get
127 * confused. See comments in early_cachepolicy() for more information.
128 */
129void __init init_default_cache_policy(unsigned long pmd)
130{
131 int i;
132
Russell King20e7e362014-06-02 09:29:37 +0100133 initial_pmd_value = pmd;
134
Russell Kingca8f0b02014-05-27 20:34:28 +0100135 pmd &= PMD_SECT_TEX(1) | PMD_SECT_BUFFERABLE | PMD_SECT_CACHEABLE;
136
137 for (i = 0; i < ARRAY_SIZE(cache_policies); i++)
138 if (cache_policies[i].pmd == pmd) {
139 cachepolicy = i;
140 break;
141 }
142
143 if (i == ARRAY_SIZE(cache_policies))
144 pr_err("ERROR: could not find cache policy\n");
145}
146
147/*
148 * These are useful for identifying cache coherency problems by allowing
149 * the cache or the cache and writebuffer to be turned off. (Note: the
150 * write buffer should not be on and the cache off).
Russell Kingae8f1542006-09-27 15:38:34 +0100151 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100152static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100153{
Russell Kingca8f0b02014-05-27 20:34:28 +0100154 int i, selected = -1;
Russell Kingae8f1542006-09-27 15:38:34 +0100155
156 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
157 int len = strlen(cache_policies[i].policy);
158
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingca8f0b02014-05-27 20:34:28 +0100160 selected = i;
Russell Kingae8f1542006-09-27 15:38:34 +0100161 break;
162 }
163 }
Russell Kingca8f0b02014-05-27 20:34:28 +0100164
165 if (selected == -1)
166 pr_err("ERROR: unknown or unsupported cache policy\n");
167
Russell King4b46d642009-11-01 17:44:24 +0000168 /*
169 * This restriction is partly to do with the way we boot; it is
170 * unpredictable to have memory mapped using two different sets of
171 * memory attributes (shared, type, and cache attribs). We can not
172 * change these attributes once the initial assembly has setup the
173 * page tables.
174 */
Russell Kingca8f0b02014-05-27 20:34:28 +0100175 if (cpu_architecture() >= CPU_ARCH_ARMv6 && selected != cachepolicy) {
176 pr_warn("Only cachepolicy=%s supported on ARMv6 and later\n",
177 cache_policies[cachepolicy].policy);
178 return 0;
Catalin Marinas11179d82007-07-20 11:42:24 +0100179 }
Russell Kingca8f0b02014-05-27 20:34:28 +0100180
181 if (selected != cachepolicy) {
182 unsigned long cr = __clear_cr(cache_policies[selected].cr_mask);
183 cachepolicy = selected;
184 flush_cache_all();
185 set_cr(cr);
186 }
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100187 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100188}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100189early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100190
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100191static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100192{
193 char *p = "buffered";
194 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100195 early_cachepolicy(p);
196 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100197}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100198early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100199
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100200static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100201{
202 char *p = "uncached";
203 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100204 early_cachepolicy(p);
205 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100206}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100207early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100208
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000209#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100210static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100211{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100212 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100213 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100214 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100215 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100216 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100217}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100218early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000219#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100220
Uwe Kleine-Königb849a602012-01-16 10:34:31 +0100221#else /* ifdef CONFIG_CPU_CP15 */
222
223static int __init early_cachepolicy(char *p)
224{
225 pr_warning("cachepolicy kernel parameter not supported without cp15\n");
226}
227early_param("cachepolicy", early_cachepolicy);
228
229static int __init noalign_setup(char *__unused)
230{
231 pr_warning("noalign kernel parameter not supported without cp15\n");
232}
233__setup("noalign", noalign_setup);
234
235#endif /* ifdef CONFIG_CPU_CP15 / else */
236
Russell King36bb94b2010-11-16 08:40:36 +0000237#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Christoffer Dall4d9c5b82014-02-02 22:21:31 +0100238#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
Russell Kingb1cce6b2008-11-04 10:52:28 +0000239#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100240
Russell Kingb29e9f52007-04-21 10:47:29 +0100241static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100242 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100243 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
244 L_PTE_SHARED,
Christoffer Dall4d9c5b82014-02-02 22:21:31 +0100245 .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
246 s2_policy(L_PTE_S2_MT_DEV_SHARED) |
247 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100248 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000249 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100250 .domain = DOMAIN_IO,
251 },
252 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100253 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100254 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000255 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100256 .domain = DOMAIN_IO,
257 },
258 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100259 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100260 .prot_l1 = PMD_TYPE_TABLE,
261 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
262 .domain = DOMAIN_IO,
Rob Herringc2794432012-02-29 18:10:58 -0600263 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100264 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100265 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100266 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000267 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100268 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100269 },
Russell Kingebb4c652008-11-09 11:18:36 +0000270 [MT_UNCACHED] = {
271 .prot_pte = PROT_PTE_DEVICE,
272 .prot_l1 = PMD_TYPE_TABLE,
273 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
274 .domain = DOMAIN_IO,
275 },
Russell Kingae8f1542006-09-27 15:38:34 +0100276 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100277 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100278 .domain = DOMAIN_KERNEL,
279 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000280#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100281 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100282 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100283 .domain = DOMAIN_KERNEL,
284 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000285#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100286 [MT_LOW_VECTORS] = {
287 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000288 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100289 .prot_l1 = PMD_TYPE_TABLE,
290 .domain = DOMAIN_USER,
291 },
292 [MT_HIGH_VECTORS] = {
293 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000294 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100295 .prot_l1 = PMD_TYPE_TABLE,
296 .domain = DOMAIN_USER,
297 },
Russell King2e2c9de2013-10-24 10:26:40 +0100298 [MT_MEMORY_RWX] = {
Russell King36bb94b2010-11-16 08:40:36 +0000299 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100300 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100301 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100302 .domain = DOMAIN_KERNEL,
303 },
Russell Kingebd49222013-10-24 08:12:39 +0100304 [MT_MEMORY_RW] = {
305 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
306 L_PTE_XN,
307 .prot_l1 = PMD_TYPE_TABLE,
308 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
309 .domain = DOMAIN_KERNEL,
310 },
Russell Kingae8f1542006-09-27 15:38:34 +0100311 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100312 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100313 .domain = DOMAIN_KERNEL,
314 },
Russell King2e2c9de2013-10-24 10:26:40 +0100315 [MT_MEMORY_RWX_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100316 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000317 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100318 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100319 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
320 .domain = DOMAIN_KERNEL,
321 },
Russell King2e2c9de2013-10-24 10:26:40 +0100322 [MT_MEMORY_RW_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100323 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000324 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100325 .prot_l1 = PMD_TYPE_TABLE,
326 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
327 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100328 },
Russell King2e2c9de2013-10-24 10:26:40 +0100329 [MT_MEMORY_RWX_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000330 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100331 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100332 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100333 },
Russell King2e2c9de2013-10-24 10:26:40 +0100334 [MT_MEMORY_RW_SO] = {
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700335 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Santosh Shilimkar93d5bf02013-01-17 07:18:04 +0100336 L_PTE_MT_UNCACHED | L_PTE_XN,
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700337 .prot_l1 = PMD_TYPE_TABLE,
338 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
339 PMD_SECT_UNCACHED | PMD_SECT_XN,
340 .domain = DOMAIN_KERNEL,
341 },
Marek Szyprowskic7909502011-12-29 13:09:51 +0100342 [MT_MEMORY_DMA_READY] = {
Russell King71b55662013-11-25 12:01:03 +0000343 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
344 L_PTE_XN,
Marek Szyprowskic7909502011-12-29 13:09:51 +0100345 .prot_l1 = PMD_TYPE_TABLE,
346 .domain = DOMAIN_KERNEL,
347 },
Russell Kingae8f1542006-09-27 15:38:34 +0100348};
349
Russell Kingb29e9f52007-04-21 10:47:29 +0100350const struct mem_type *get_mem_type(unsigned int type)
351{
352 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
353}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200354EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100355
Laura Abbott75374ad2013-06-17 10:29:13 -0700356#define PTE_SET_FN(_name, pteop) \
357static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
358 void *data) \
359{ \
360 pte_t pte = pteop(*ptep); \
361\
362 set_pte_ext(ptep, pte, 0); \
363 return 0; \
364} \
365
366#define SET_MEMORY_FN(_name, callback) \
367int set_memory_##_name(unsigned long addr, int numpages) \
368{ \
369 unsigned long start = addr; \
370 unsigned long size = PAGE_SIZE*numpages; \
371 unsigned end = start + size; \
372\
373 if (start < MODULES_VADDR || start >= MODULES_END) \
374 return -EINVAL;\
375\
376 if (end < MODULES_VADDR || end >= MODULES_END) \
377 return -EINVAL; \
378\
379 apply_to_page_range(&init_mm, start, size, callback, NULL); \
380 flush_tlb_kernel_range(start, end); \
381 return 0;\
382}
383
384PTE_SET_FN(ro, pte_wrprotect)
385PTE_SET_FN(rw, pte_mkwrite)
386PTE_SET_FN(x, pte_mkexec)
387PTE_SET_FN(nx, pte_mknexec)
388
389SET_MEMORY_FN(ro, pte_set_ro)
390SET_MEMORY_FN(rw, pte_set_rw)
391SET_MEMORY_FN(x, pte_set_x)
392SET_MEMORY_FN(nx, pte_set_nx)
393
Russell Kingae8f1542006-09-27 15:38:34 +0100394/*
395 * Adjust the PMD section entries according to the CPU in use.
396 */
397static void __init build_mem_type_table(void)
398{
399 struct cachepolicy *cp;
400 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100401 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500402 pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100403 int cpu_arch = cpu_architecture();
404 int i;
405
Catalin Marinas11179d82007-07-20 11:42:24 +0100406 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100407#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100408 if (cachepolicy > CPOLICY_BUFFERED)
409 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100410#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100411 if (cachepolicy > CPOLICY_WRITETHROUGH)
412 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100413#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100414 }
Russell Kingae8f1542006-09-27 15:38:34 +0100415 if (cpu_arch < CPU_ARCH_ARMv5) {
416 if (cachepolicy >= CPOLICY_WRITEALLOC)
417 cachepolicy = CPOLICY_WRITEBACK;
418 ecc_mask = 0;
419 }
Russell Kingca8f0b02014-05-27 20:34:28 +0100420
Russell King20e7e362014-06-02 09:29:37 +0100421 if (is_smp()) {
422 if (cachepolicy != CPOLICY_WRITEALLOC) {
423 pr_warn("Forcing write-allocate cache policy for SMP\n");
424 cachepolicy = CPOLICY_WRITEALLOC;
425 }
426 if (!(initial_pmd_value & PMD_SECT_S)) {
427 pr_warn("Forcing shared mappings for SMP\n");
428 initial_pmd_value |= PMD_SECT_S;
429 }
Russell Kingca8f0b02014-05-27 20:34:28 +0100430 }
Russell Kingae8f1542006-09-27 15:38:34 +0100431
432 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000433 * Strip out features not present on earlier architectures.
434 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
435 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100436 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000437 if (cpu_arch < CPU_ARCH_ARMv5)
438 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
439 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
440 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
441 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
442 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100443
444 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000445 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
446 * "update-able on write" bit on ARM610). However, Xscale and
447 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100448 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000449 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100450 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100451 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100452 mem_types[i].prot_l1 &= ~PMD_BIT4;
453 }
454 } else if (cpu_arch < CPU_ARCH_ARMv6) {
455 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100456 if (mem_types[i].prot_l1)
457 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100458 if (mem_types[i].prot_sect)
459 mem_types[i].prot_sect |= PMD_BIT4;
460 }
461 }
Russell Kingae8f1542006-09-27 15:38:34 +0100462
Russell Kingb1cce6b2008-11-04 10:52:28 +0000463 /*
464 * Mark the device areas according to the CPU/architecture.
465 */
466 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
467 if (!cpu_is_xsc3()) {
468 /*
469 * Mark device regions on ARMv6+ as execute-never
470 * to prevent speculative instruction fetches.
471 */
472 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
473 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
474 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
475 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
Russell Kingebd49222013-10-24 08:12:39 +0100476
477 /* Also setup NX memory mapping */
478 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
Russell Kingb1cce6b2008-11-04 10:52:28 +0000479 }
480 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
481 /*
482 * For ARMv7 with TEX remapping,
483 * - shared device is SXCB=1100
484 * - nonshared device is SXCB=0100
485 * - write combine device mem is SXCB=0001
486 * (Uncached Normal memory)
487 */
488 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
489 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
490 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
491 } else if (cpu_is_xsc3()) {
492 /*
493 * For Xscale3,
494 * - shared device is TEXCB=00101
495 * - nonshared device is TEXCB=01000
496 * - write combine device mem is TEXCB=00100
497 * (Inner/Outer Uncacheable in xsc3 parlance)
498 */
499 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
500 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
501 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
502 } else {
503 /*
504 * For ARMv6 and ARMv7 without TEX remapping,
505 * - shared device is TEXCB=00001
506 * - nonshared device is TEXCB=01000
507 * - write combine device mem is TEXCB=00100
508 * (Uncached Normal in ARMv6 parlance).
509 */
510 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
511 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
512 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
513 }
514 } else {
515 /*
516 * On others, write combining is "Uncached/Buffered"
517 */
518 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
519 }
520
521 /*
522 * Now deal with the memory-type mappings
523 */
Russell Kingae8f1542006-09-27 15:38:34 +0100524 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100525 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500526 s2_pgprot = cp->pte_s2;
Christoffer Dall4d9c5b82014-02-02 22:21:31 +0100527 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
528 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
Russell Kingbb30f362008-09-06 20:04:59 +0100529
Russell Kingbb30f362008-09-06 20:04:59 +0100530 /*
Will Deaconb6ccb982014-02-07 19:12:27 +0100531 * We don't use domains on ARMv6 (since this causes problems with
532 * v6/v7 kernels), so we must use a separate memory type for user
533 * r/o, kernel r/w to map the vectors page.
534 */
535#ifndef CONFIG_ARM_LPAE
536 if (cpu_arch == CPU_ARCH_ARMv6)
537 vecs_pgprot |= L_PTE_MT_VECTORS;
538#endif
Russell Kingbb30f362008-09-06 20:04:59 +0100539
540 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100541 * ARMv6 and above have extended page tables.
542 */
543 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000544#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100545 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100546 * Mark cache clean areas and XIP ROM read only
547 * from SVC mode and no access from userspace.
548 */
549 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
550 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
551 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000552#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100553
Russell King20e7e362014-06-02 09:29:37 +0100554 /*
555 * If the initial page tables were created with the S bit
556 * set, then we need to do the same here for the same
557 * reasons given in early_cachepolicy().
558 */
559 if (initial_pmd_value & PMD_SECT_S) {
Russell Kingf00ec482010-09-04 10:47:48 +0100560 user_pgprot |= L_PTE_SHARED;
561 kern_pgprot |= L_PTE_SHARED;
562 vecs_pgprot |= L_PTE_SHARED;
Christoffer Dallcc577c22013-01-20 18:28:04 -0500563 s2_pgprot |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100564 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
565 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
566 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
567 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
Russell King2e2c9de2013-10-24 10:26:40 +0100568 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
569 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
Russell Kingebd49222013-10-24 08:12:39 +0100570 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
571 mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100572 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell King2e2c9de2013-10-24 10:26:40 +0100573 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;
574 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100575 }
Russell Kingae8f1542006-09-27 15:38:34 +0100576 }
577
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100578 /*
579 * Non-cacheable Normal - intended for memory areas that must
580 * not cause dirty cache line writebacks when used
581 */
582 if (cpu_arch >= CPU_ARCH_ARMv6) {
583 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
584 /* Non-cacheable Normal is XCB = 001 */
Russell King2e2c9de2013-10-24 10:26:40 +0100585 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100586 PMD_SECT_BUFFERED;
587 } else {
588 /* For both ARMv6 and non-TEX-remapping ARMv7 */
Russell King2e2c9de2013-10-24 10:26:40 +0100589 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |=
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100590 PMD_SECT_TEX(1);
591 }
592 } else {
Russell King2e2c9de2013-10-24 10:26:40 +0100593 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100594 }
595
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000596#ifdef CONFIG_ARM_LPAE
597 /*
598 * Do not generate access flag faults for the kernel mappings.
599 */
600 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
601 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100602 if (mem_types[i].prot_sect)
603 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000604 }
605 kern_pgprot |= PTE_EXT_AF;
606 vecs_pgprot |= PTE_EXT_AF;
607#endif
608
Russell Kingae8f1542006-09-27 15:38:34 +0100609 for (i = 0; i < 16; i++) {
Will Deacon864aa042012-09-18 19:18:35 +0100610 pteval_t v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100611 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100612 }
613
Russell Kingbb30f362008-09-06 20:04:59 +0100614 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
615 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100616
Imre_Deak44b18692007-02-11 13:45:13 +0100617 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100618 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000619 L_PTE_DIRTY | kern_pgprot);
Christoffer Dallcc577c22013-01-20 18:28:04 -0500620 pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
621 pgprot_s2_device = __pgprot(s2_device_pgprot);
622 pgprot_hyp_device = __pgprot(hyp_device_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100623
624 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
625 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
Russell King2e2c9de2013-10-24 10:26:40 +0100626 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
627 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
Russell Kingebd49222013-10-24 08:12:39 +0100628 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
629 mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
Marek Szyprowskic7909502011-12-29 13:09:51 +0100630 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Russell King2e2c9de2013-10-24 10:26:40 +0100631 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100632 mem_types[MT_ROM].prot_sect |= cp->pmd;
633
634 switch (cp->pmd) {
635 case PMD_SECT_WT:
636 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
637 break;
638 case PMD_SECT_WB:
639 case PMD_SECT_WBWA:
640 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
641 break;
642 }
Michal Simek905b5792013-11-07 12:49:53 +0100643 pr_info("Memory policy: %sData cache %s\n",
644 ecc_mask ? "ECC enabled, " : "", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100645
646 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
647 struct mem_type *t = &mem_types[i];
648 if (t->prot_l1)
649 t->prot_l1 |= PMD_DOMAIN(t->domain);
650 if (t->prot_sect)
651 t->prot_sect |= PMD_DOMAIN(t->domain);
652 }
Russell Kingae8f1542006-09-27 15:38:34 +0100653}
654
Catalin Marinasd9073872010-09-13 16:01:24 +0100655#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
656pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
657 unsigned long size, pgprot_t vma_prot)
658{
659 if (!pfn_valid(pfn))
660 return pgprot_noncached(vma_prot);
661 else if (file->f_flags & O_SYNC)
662 return pgprot_writecombine(vma_prot);
663 return vma_prot;
664}
665EXPORT_SYMBOL(phys_mem_access_prot);
666#endif
667
Russell Kingae8f1542006-09-27 15:38:34 +0100668#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
669
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400670static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000671{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400672 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100673 memset(ptr, 0, sz);
674 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000675}
676
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400677static void __init *early_alloc(unsigned long sz)
678{
679 return early_alloc_aligned(sz, sz);
680}
681
Russell King4bb2e272010-07-01 18:33:29 +0100682static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
683{
684 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100685 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000686 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100687 }
688 BUG_ON(pmd_bad(*pmd));
689 return pte_offset_kernel(pmd, addr);
690}
691
Russell King24e6c692007-04-21 10:21:28 +0100692static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
693 unsigned long end, unsigned long pfn,
694 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100695{
Russell King4bb2e272010-07-01 18:33:29 +0100696 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100697 do {
Russell King40d192b2008-09-06 21:15:56 +0100698 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100699 pfn++;
700 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100701}
702
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100703static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
Sricharan Re651eab2013-03-18 12:24:04 +0100704 unsigned long end, phys_addr_t phys,
705 const struct mem_type *type)
706{
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100707 pmd_t *p = pmd;
708
Sricharan Re651eab2013-03-18 12:24:04 +0100709#ifndef CONFIG_ARM_LPAE
710 /*
711 * In classic MMU format, puds and pmds are folded in to
712 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
713 * group of L1 entries making up one logical pointer to
714 * an L2 table (2MB), where as PMDs refer to the individual
715 * L1 entries (1MB). Hence increment to get the correct
716 * offset for odd 1MB sections.
717 * (See arch/arm/include/asm/pgtable-2level.h)
718 */
719 if (addr & SECTION_SIZE)
720 pmd++;
721#endif
722 do {
723 *pmd = __pmd(phys | type->prot_sect);
724 phys += SECTION_SIZE;
725 } while (pmd++, addr += SECTION_SIZE, addr != end);
726
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100727 flush_pmd_entry(p);
Sricharan Re651eab2013-03-18 12:24:04 +0100728}
729
730static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000731 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100732 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100733{
Russell King516295e2010-11-21 16:27:49 +0000734 pmd_t *pmd = pmd_offset(pud, addr);
Sricharan Re651eab2013-03-18 12:24:04 +0100735 unsigned long next;
Russell Kingae8f1542006-09-27 15:38:34 +0100736
Sricharan Re651eab2013-03-18 12:24:04 +0100737 do {
Russell King24e6c692007-04-21 10:21:28 +0100738 /*
Sricharan Re651eab2013-03-18 12:24:04 +0100739 * With LPAE, we must loop over to map
740 * all the pmds for the given range.
Russell King24e6c692007-04-21 10:21:28 +0100741 */
Sricharan Re651eab2013-03-18 12:24:04 +0100742 next = pmd_addr_end(addr, end);
743
744 /*
745 * Try a section mapping - addr, next and phys must all be
746 * aligned to a section boundary.
747 */
748 if (type->prot_sect &&
749 ((addr | next | phys) & ~SECTION_MASK) == 0) {
Po-Yu Chuang37468b32013-06-07 12:15:45 +0100750 __map_init_section(pmd, addr, next, phys, type);
Sricharan Re651eab2013-03-18 12:24:04 +0100751 } else {
752 alloc_init_pte(pmd, addr, next,
753 __phys_to_pfn(phys), type);
754 }
755
756 phys += next - addr;
757
758 } while (pmd++, addr = next, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100759}
760
Stephen Boyd14904922012-04-27 01:40:10 +0100761static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Vitaly Andrianov20d69562012-07-10 14:41:17 -0400762 unsigned long end, phys_addr_t phys,
763 const struct mem_type *type)
Russell King516295e2010-11-21 16:27:49 +0000764{
765 pud_t *pud = pud_offset(pgd, addr);
766 unsigned long next;
767
768 do {
769 next = pud_addr_end(addr, end);
Sricharan Re651eab2013-03-18 12:24:04 +0100770 alloc_init_pmd(pud, addr, next, phys, type);
Russell King516295e2010-11-21 16:27:49 +0000771 phys += next - addr;
772 } while (pud++, addr = next, addr != end);
773}
774
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000775#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100776static void __init create_36bit_mapping(struct map_desc *md,
777 const struct mem_type *type)
778{
Russell King97092e02010-11-16 00:16:01 +0000779 unsigned long addr, length, end;
780 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100781 pgd_t *pgd;
782
783 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100784 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100785 length = PAGE_ALIGN(md->length);
786
787 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
788 printk(KERN_ERR "MM: CPU does not support supersection "
789 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100790 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100791 return;
792 }
793
794 /* N.B. ARMv6 supersections are only defined to work with domain 0.
795 * Since domain assignments can in fact be arbitrary, the
796 * 'domain == 0' check below is required to insure that ARMv6
797 * supersections are only allocated for domain 0 regardless
798 * of the actual domain assignments in use.
799 */
800 if (type->domain) {
801 printk(KERN_ERR "MM: invalid domain in supersection "
802 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100803 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100804 return;
805 }
806
807 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100808 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
809 " at 0x%08lx invalid alignment\n",
810 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100811 return;
812 }
813
814 /*
815 * Shift bits [35:32] of address into bits [23:20] of PMD
816 * (See ARMv6 spec).
817 */
818 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
819
820 pgd = pgd_offset_k(addr);
821 end = addr + length;
822 do {
Russell King516295e2010-11-21 16:27:49 +0000823 pud_t *pud = pud_offset(pgd, addr);
824 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100825 int i;
826
827 for (i = 0; i < 16; i++)
828 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
829
830 addr += SUPERSECTION_SIZE;
831 phys += SUPERSECTION_SIZE;
832 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
833 } while (addr != end);
834}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000835#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100836
Russell Kingae8f1542006-09-27 15:38:34 +0100837/*
838 * Create the page directory entries and any necessary
839 * page tables for the mapping specified by `md'. We
840 * are able to cope here with varying sizes and address
841 * offsets, and we take full advantage of sections and
842 * supersections.
843 */
Russell Kinga2227122010-03-25 18:56:05 +0000844static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100845{
Will Deaconcae62922011-02-15 12:42:57 +0100846 unsigned long addr, length, end;
847 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100848 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100849 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100850
851 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100852 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
853 " at 0x%08lx in user region\n",
854 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100855 return;
856 }
857
858 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400859 md->virtual >= PAGE_OFFSET &&
860 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100861 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400862 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100863 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100864 }
865
Russell Kingd5c98172007-04-21 10:05:32 +0100866 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100867
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000868#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100869 /*
870 * Catch 36-bit addresses
871 */
Russell King4a56c1e2007-04-21 10:16:48 +0100872 if (md->pfn >= 0x100000) {
873 create_36bit_mapping(md, type);
874 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100875 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000876#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100877
Russell King7b9c7b42007-07-04 21:16:33 +0100878 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100879 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100880 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100881
Russell King24e6c692007-04-21 10:21:28 +0100882 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100883 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100884 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100885 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100886 return;
887 }
888
Russell King24e6c692007-04-21 10:21:28 +0100889 pgd = pgd_offset_k(addr);
890 end = addr + length;
891 do {
892 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100893
Russell King516295e2010-11-21 16:27:49 +0000894 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100895
Russell King24e6c692007-04-21 10:21:28 +0100896 phys += next - addr;
897 addr = next;
898 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100899}
900
901/*
902 * Create the architecture specific mappings
903 */
904void __init iotable_init(struct map_desc *io_desc, int nr)
905{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400906 struct map_desc *md;
907 struct vm_struct *vm;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100908 struct static_vm *svm;
Russell Kingae8f1542006-09-27 15:38:34 +0100909
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400910 if (!nr)
911 return;
912
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100913 svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400914
915 for (md = io_desc; nr; md++, nr--) {
916 create_mapping(md);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100917
918 vm = &svm->vm;
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400919 vm->addr = (void *)(md->virtual & PAGE_MASK);
920 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Rob Herringc2794432012-02-29 18:10:58 -0600921 vm->phys_addr = __pfn_to_phys(md->pfn);
922 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400923 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400924 vm->caller = iotable_init;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100925 add_static_vm_early(svm++);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400926 }
Russell Kingae8f1542006-09-27 15:38:34 +0100927}
928
Rob Herringc2794432012-02-29 18:10:58 -0600929void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
930 void *caller)
931{
932 struct vm_struct *vm;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100933 struct static_vm *svm;
Rob Herringc2794432012-02-29 18:10:58 -0600934
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100935 svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
936
937 vm = &svm->vm;
Rob Herringc2794432012-02-29 18:10:58 -0600938 vm->addr = (void *)addr;
939 vm->size = size;
Arnd Bergmann863e99a2012-09-04 15:01:37 +0200940 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Rob Herringc2794432012-02-29 18:10:58 -0600941 vm->caller = caller;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100942 add_static_vm_early(svm);
Rob Herringc2794432012-02-29 18:10:58 -0600943}
944
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100945#ifndef CONFIG_ARM_LPAE
946
947/*
948 * The Linux PMD is made of two consecutive section entries covering 2MB
949 * (see definition in include/asm/pgtable-2level.h). However a call to
950 * create_mapping() may optimize static mappings by using individual
951 * 1MB section mappings. This leaves the actual PMD potentially half
952 * initialized if the top or bottom section entry isn't used, leaving it
953 * open to problems if a subsequent ioremap() or vmalloc() tries to use
954 * the virtual space left free by that unused section entry.
955 *
956 * Let's avoid the issue by inserting dummy vm entries covering the unused
957 * PMD halves once the static mappings are in place.
958 */
959
960static void __init pmd_empty_section_gap(unsigned long addr)
961{
Rob Herringc2794432012-02-29 18:10:58 -0600962 vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap);
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100963}
964
965static void __init fill_pmd_gaps(void)
966{
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100967 struct static_vm *svm;
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100968 struct vm_struct *vm;
969 unsigned long addr, next = 0;
970 pmd_t *pmd;
971
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100972 list_for_each_entry(svm, &static_vmlist, list) {
973 vm = &svm->vm;
Nicolas Pitre19b52ab2012-06-27 17:28:57 +0100974 addr = (unsigned long)vm->addr;
975 if (addr < next)
976 continue;
977
978 /*
979 * Check if this vm starts on an odd section boundary.
980 * If so and the first section entry for this PMD is free
981 * then we block the corresponding virtual address.
982 */
983 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
984 pmd = pmd_off_k(addr);
985 if (pmd_none(*pmd))
986 pmd_empty_section_gap(addr & PMD_MASK);
987 }
988
989 /*
990 * Then check if this vm ends on an odd section boundary.
991 * If so and the second section entry for this PMD is empty
992 * then we block the corresponding virtual address.
993 */
994 addr += vm->size;
995 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
996 pmd = pmd_off_k(addr) + 1;
997 if (pmd_none(*pmd))
998 pmd_empty_section_gap(addr);
999 }
1000
1001 /* no need to look at any vm entry until we hit the next PMD */
1002 next = (addr + PMD_SIZE - 1) & PMD_MASK;
1003 }
1004}
1005
1006#else
1007#define fill_pmd_gaps() do { } while (0)
1008#endif
1009
Rob Herringc2794432012-02-29 18:10:58 -06001010#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
1011static void __init pci_reserve_io(void)
1012{
Joonsoo Kim101eeda2013-02-09 06:28:06 +01001013 struct static_vm *svm;
Rob Herringc2794432012-02-29 18:10:58 -06001014
Joonsoo Kim101eeda2013-02-09 06:28:06 +01001015 svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
1016 if (svm)
1017 return;
Rob Herringc2794432012-02-29 18:10:58 -06001018
Rob Herringc2794432012-02-29 18:10:58 -06001019 vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
1020}
1021#else
1022#define pci_reserve_io() do { } while (0)
1023#endif
1024
Rob Herringe5c5f2a2012-10-22 11:42:54 -06001025#ifdef CONFIG_DEBUG_LL
1026void __init debug_ll_io_init(void)
1027{
1028 struct map_desc map;
1029
1030 debug_ll_addr(&map.pfn, &map.virtual);
1031 if (!map.pfn || !map.virtual)
1032 return;
1033 map.pfn = __phys_to_pfn(map.pfn);
1034 map.virtual &= PAGE_MASK;
1035 map.length = PAGE_SIZE;
1036 map.type = MT_DEVICE;
Stephen Boydee4de5d2013-07-06 00:25:51 +01001037 iotable_init(&map, 1);
Rob Herringe5c5f2a2012-10-22 11:42:54 -06001038}
1039#endif
1040
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001041static void * __initdata vmalloc_min =
1042 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +01001043
1044/*
1045 * vmalloc=size forces the vmalloc area to be exactly 'size'
1046 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001047 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +01001048 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001049static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +01001050{
Russell King79612392010-05-22 16:20:14 +01001051 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +01001052
1053 if (vmalloc_reserve < SZ_16M) {
1054 vmalloc_reserve = SZ_16M;
1055 printk(KERN_WARNING
1056 "vmalloc area too small, limiting to %luMB\n",
1057 vmalloc_reserve >> 20);
1058 }
Nicolas Pitre92108072008-09-19 10:43:06 -04001059
1060 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1061 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1062 printk(KERN_WARNING
1063 "vmalloc area is too big, limiting to %luMB\n",
1064 vmalloc_reserve >> 20);
1065 }
Russell King79612392010-05-22 16:20:14 +01001066
1067 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001068 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +01001069}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001070early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +01001071
Marek Szyprowskic7909502011-12-29 13:09:51 +01001072phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +01001073
Russell King0371d3f2011-07-05 19:58:29 +01001074void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001075{
Russell Kingc65b7e92013-07-17 17:53:04 +01001076 phys_addr_t memblock_limit = 0;
Russell Kingdde58282009-08-15 12:36:00 +01001077 int i, j, highmem = 0;
Cyril Chemparathy82f66702012-07-20 12:01:23 -04001078 phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001079
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001080 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001081 struct membank *bank = &meminfo.bank[j];
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001082 phys_addr_t size_limit;
1083
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001084 *bank = meminfo.bank[i];
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001085 size_limit = bank->size;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001086
Cyril Chemparathy82f66702012-07-20 12:01:23 -04001087 if (bank->start >= vmalloc_limit)
Will Deacon77f73a22011-11-22 17:30:32 +00001088 highmem = 1;
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001089 else
1090 size_limit = vmalloc_limit - bank->start;
Russell Kingdde58282009-08-15 12:36:00 +01001091
1092 bank->highmem = highmem;
1093
Cyril Chemparathyadf2e9f2012-07-20 12:24:45 -04001094#ifdef CONFIG_HIGHMEM
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001095 /*
1096 * Split those memory banks which are partially overlapping
1097 * the vmalloc area greatly simplifying things later.
1098 */
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001099 if (!highmem && bank->size > size_limit) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001100 if (meminfo.nr_banks >= NR_BANKS) {
1101 printk(KERN_CRIT "NR_BANKS too low, "
1102 "ignoring high memory\n");
1103 } else {
1104 memmove(bank + 1, bank,
1105 (meminfo.nr_banks - i) * sizeof(*bank));
1106 meminfo.nr_banks++;
1107 i++;
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001108 bank[1].size -= size_limit;
Cyril Chemparathy82f66702012-07-20 12:01:23 -04001109 bank[1].start = vmalloc_limit;
Russell Kingdde58282009-08-15 12:36:00 +01001110 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001111 j++;
1112 }
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001113 bank->size = size_limit;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001114 }
1115#else
1116 /*
Will Deacon77f73a22011-11-22 17:30:32 +00001117 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1118 */
1119 if (highmem) {
1120 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1121 "(!CONFIG_HIGHMEM).\n",
1122 (unsigned long long)bank->start,
1123 (unsigned long long)bank->start + bank->size - 1);
1124 continue;
1125 }
1126
1127 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001128 * Check whether this memory bank would partially overlap
1129 * the vmalloc area.
1130 */
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001131 if (bank->size > size_limit) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001132 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1133 "to -%.8llx (vmalloc region overlap).\n",
1134 (unsigned long long)bank->start,
1135 (unsigned long long)bank->start + bank->size - 1,
Cyril Chemparathy28d4bf72012-07-20 13:16:41 -04001136 (unsigned long long)bank->start + size_limit - 1);
1137 bank->size = size_limit;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001138 }
1139#endif
Russell Kingc65b7e92013-07-17 17:53:04 +01001140 if (!bank->highmem) {
1141 phys_addr_t bank_end = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001142
Russell Kingc65b7e92013-07-17 17:53:04 +01001143 if (bank_end > arm_lowmem_limit)
1144 arm_lowmem_limit = bank_end;
1145
1146 /*
1147 * Find the first non-section-aligned page, and point
1148 * memblock_limit at it. This relies on rounding the
1149 * limit down to be section-aligned, which happens at
1150 * the end of this function.
1151 *
1152 * With this algorithm, the start or end of almost any
1153 * bank can be non-section-aligned. The only exception
1154 * is that the start of the bank 0 must be section-
1155 * aligned, since otherwise memory would need to be
1156 * allocated when mapping the start of bank 0, which
1157 * occurs before any free memory is mapped.
1158 */
1159 if (!memblock_limit) {
1160 if (!IS_ALIGNED(bank->start, SECTION_SIZE))
1161 memblock_limit = bank->start;
1162 else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
1163 memblock_limit = bank_end;
1164 }
1165 }
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001166 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001167 }
Russell Kinge616c592009-09-27 20:55:43 +01001168#ifdef CONFIG_HIGHMEM
1169 if (highmem) {
1170 const char *reason = NULL;
1171
1172 if (cache_is_vipt_aliasing()) {
1173 /*
1174 * Interactions between kmap and other mappings
1175 * make highmem support with aliasing VIPT caches
1176 * rather difficult.
1177 */
1178 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001179 }
1180 if (reason) {
1181 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1182 reason);
1183 while (j > 0 && meminfo.bank[j - 1].highmem)
1184 j--;
1185 }
1186 }
1187#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001188 meminfo.nr_banks = j;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001189 high_memory = __va(arm_lowmem_limit - 1) + 1;
Russell Kingc65b7e92013-07-17 17:53:04 +01001190
1191 /*
1192 * Round the memblock limit down to a section size. This
1193 * helps to ensure that we will allocate memory from the
1194 * last full section, which should be mapped.
1195 */
1196 if (memblock_limit)
1197 memblock_limit = round_down(memblock_limit, SECTION_SIZE);
1198 if (!memblock_limit)
1199 memblock_limit = arm_lowmem_limit;
1200
1201 memblock_set_current_limit(memblock_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001202}
1203
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001204static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001205{
1206 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001207 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001208
1209 /*
1210 * Clear out all the mappings below the kernel image.
1211 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001212 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001213 pmd_clear(pmd_off_k(addr));
1214
1215#ifdef CONFIG_XIP_KERNEL
1216 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001217 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001218#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001219 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001220 pmd_clear(pmd_off_k(addr));
1221
1222 /*
Russell King8df65162010-10-27 19:57:38 +01001223 * Find the end of the first block of lowmem.
1224 */
1225 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskic7909502011-12-29 13:09:51 +01001226 if (end >= arm_lowmem_limit)
1227 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001228
1229 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001230 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001231 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001232 */
Russell King8df65162010-10-27 19:57:38 +01001233 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001234 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001235 pmd_clear(pmd_off_k(addr));
1236}
1237
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001238#ifdef CONFIG_ARM_LPAE
1239/* the first page is reserved for pgd */
1240#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1241 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1242#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001243#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001244#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001245
Russell Kingd111e8f2006-09-27 15:27:33 +01001246/*
Russell King2778f622010-07-09 16:27:52 +01001247 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001248 */
Russell King2778f622010-07-09 16:27:52 +01001249void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001250{
Russell Kingd111e8f2006-09-27 15:27:33 +01001251 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001252 * Reserve the page tables. These are already in use,
1253 * and can only be in node 0.
1254 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001255 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001256
Russell Kingd111e8f2006-09-27 15:27:33 +01001257#ifdef CONFIG_SA1111
1258 /*
1259 * Because of the SA1111 DMA bug, we want to preserve our
1260 * precious DMA-able memory...
1261 */
Russell King2778f622010-07-09 16:27:52 +01001262 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001263#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001264}
1265
1266/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001267 * Set up the device mappings. Since we clear out the page tables for all
1268 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001269 * This means you have to be careful how you debug this function, or any
1270 * called function. This means you can't use any function or debugging
1271 * method which may touch any device, otherwise the kernel _will_ crash.
1272 */
Russell Kingff69a4c2013-07-26 14:55:59 +01001273static void __init devicemaps_init(const struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001274{
1275 struct map_desc map;
1276 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001277 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001278
1279 /*
1280 * Allocate the vector page early.
1281 */
Russell King19accfd2013-07-04 11:40:32 +01001282 vectors = early_alloc(PAGE_SIZE * 2);
Russell King94e5a852012-01-18 15:32:49 +00001283
1284 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001285
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001286 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001287 pmd_clear(pmd_off_k(addr));
1288
1289 /*
1290 * Map the kernel if it is XIP.
1291 * It is always first in the modulearea.
1292 */
1293#ifdef CONFIG_XIP_KERNEL
1294 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001295 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001296 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001297 map.type = MT_ROM;
1298 create_mapping(&map);
1299#endif
1300
1301 /*
1302 * Map the cache flushing regions.
1303 */
1304#ifdef FLUSH_BASE
1305 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1306 map.virtual = FLUSH_BASE;
1307 map.length = SZ_1M;
1308 map.type = MT_CACHECLEAN;
1309 create_mapping(&map);
1310#endif
1311#ifdef FLUSH_BASE_MINICACHE
1312 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1313 map.virtual = FLUSH_BASE_MINICACHE;
1314 map.length = SZ_1M;
1315 map.type = MT_MINICLEAN;
1316 create_mapping(&map);
1317#endif
1318
1319 /*
1320 * Create a mapping for the machine vectors at the high-vectors
1321 * location (0xffff0000). If we aren't using high-vectors, also
1322 * create a mapping at the low-vectors virtual address.
1323 */
Russell King94e5a852012-01-18 15:32:49 +00001324 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001325 map.virtual = 0xffff0000;
1326 map.length = PAGE_SIZE;
Russell Kinga5463cd2013-07-31 21:58:56 +01001327#ifdef CONFIG_KUSER_HELPERS
Russell Kingd111e8f2006-09-27 15:27:33 +01001328 map.type = MT_HIGH_VECTORS;
Russell Kinga5463cd2013-07-31 21:58:56 +01001329#else
1330 map.type = MT_LOW_VECTORS;
1331#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001332 create_mapping(&map);
1333
1334 if (!vectors_high()) {
1335 map.virtual = 0;
Russell King19accfd2013-07-04 11:40:32 +01001336 map.length = PAGE_SIZE * 2;
Russell Kingd111e8f2006-09-27 15:27:33 +01001337 map.type = MT_LOW_VECTORS;
1338 create_mapping(&map);
1339 }
1340
Russell King19accfd2013-07-04 11:40:32 +01001341 /* Now create a kernel read-only mapping */
1342 map.pfn += 1;
1343 map.virtual = 0xffff0000 + PAGE_SIZE;
1344 map.length = PAGE_SIZE;
1345 map.type = MT_LOW_VECTORS;
1346 create_mapping(&map);
1347
Russell Kingd111e8f2006-09-27 15:27:33 +01001348 /*
1349 * Ask the machine support to map in the statically mapped devices.
1350 */
1351 if (mdesc->map_io)
1352 mdesc->map_io();
Maxime Ripardbc373242013-04-18 21:52:23 +02001353 else
1354 debug_ll_io_init();
Nicolas Pitre19b52ab2012-06-27 17:28:57 +01001355 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001356
Rob Herringc2794432012-02-29 18:10:58 -06001357 /* Reserve fixed i/o space in VMALLOC region */
1358 pci_reserve_io();
1359
Russell Kingd111e8f2006-09-27 15:27:33 +01001360 /*
1361 * Finally flush the caches and tlb to ensure that we're in a
1362 * consistent state wrt the writebuffer. This also ensures that
1363 * any write-allocated cache lines in the vector page are written
1364 * back. After this point, we can start to touch devices again.
1365 */
1366 local_flush_tlb_all();
1367 flush_cache_all();
1368}
1369
Nicolas Pitred73cd422008-09-15 16:44:55 -04001370static void __init kmap_init(void)
1371{
1372#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +01001373 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1374 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001375#endif
1376}
1377
Russell Kinga2227122010-03-25 18:56:05 +00001378static void __init map_lowmem(void)
1379{
Russell King8df65162010-10-27 19:57:38 +01001380 struct memblock_region *reg;
Russell Kingebd49222013-10-24 08:12:39 +01001381 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
1382 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
Russell Kinga2227122010-03-25 18:56:05 +00001383
1384 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001385 for_each_memblock(memory, reg) {
1386 phys_addr_t start = reg->base;
1387 phys_addr_t end = start + reg->size;
1388 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001389
Marek Szyprowskic7909502011-12-29 13:09:51 +01001390 if (end > arm_lowmem_limit)
1391 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001392 if (start >= end)
1393 break;
1394
Russell Kingebd49222013-10-24 08:12:39 +01001395 if (end < kernel_x_start || start >= kernel_x_end) {
1396 map.pfn = __phys_to_pfn(start);
1397 map.virtual = __phys_to_virt(start);
1398 map.length = end - start;
1399 map.type = MT_MEMORY_RWX;
Russell King8df65162010-10-27 19:57:38 +01001400
Russell Kingebd49222013-10-24 08:12:39 +01001401 create_mapping(&map);
1402 } else {
1403 /* This better cover the entire kernel */
1404 if (start < kernel_x_start) {
1405 map.pfn = __phys_to_pfn(start);
1406 map.virtual = __phys_to_virt(start);
1407 map.length = kernel_x_start - start;
1408 map.type = MT_MEMORY_RW;
1409
1410 create_mapping(&map);
1411 }
1412
1413 map.pfn = __phys_to_pfn(kernel_x_start);
1414 map.virtual = __phys_to_virt(kernel_x_start);
1415 map.length = kernel_x_end - kernel_x_start;
1416 map.type = MT_MEMORY_RWX;
1417
1418 create_mapping(&map);
1419
1420 if (kernel_x_end < end) {
1421 map.pfn = __phys_to_pfn(kernel_x_end);
1422 map.virtual = __phys_to_virt(kernel_x_end);
1423 map.length = end - kernel_x_end;
1424 map.type = MT_MEMORY_RW;
1425
1426 create_mapping(&map);
1427 }
1428 }
Russell Kinga2227122010-03-25 18:56:05 +00001429 }
1430}
1431
Santosh Shilimkara77e0c72013-07-31 12:44:46 -04001432#ifdef CONFIG_ARM_LPAE
1433/*
1434 * early_paging_init() recreates boot time page table setup, allowing machines
1435 * to switch over to a high (>4G) address space on LPAE systems
1436 */
1437void __init early_paging_init(const struct machine_desc *mdesc,
1438 struct proc_info_list *procinfo)
1439{
1440 pmdval_t pmdprot = procinfo->__cpu_mm_mmu_flags;
1441 unsigned long map_start, map_end;
1442 pgd_t *pgd0, *pgdk;
1443 pud_t *pud0, *pudk, *pud_start;
1444 pmd_t *pmd0, *pmdk;
1445 phys_addr_t phys;
1446 int i;
1447
1448 if (!(mdesc->init_meminfo))
1449 return;
1450
1451 /* remap kernel code and data */
1452 map_start = init_mm.start_code;
1453 map_end = init_mm.brk;
1454
1455 /* get a handle on things... */
1456 pgd0 = pgd_offset_k(0);
1457 pud_start = pud0 = pud_offset(pgd0, 0);
1458 pmd0 = pmd_offset(pud0, 0);
1459
1460 pgdk = pgd_offset_k(map_start);
1461 pudk = pud_offset(pgdk, map_start);
1462 pmdk = pmd_offset(pudk, map_start);
1463
1464 mdesc->init_meminfo();
1465
1466 /* Run the patch stub to update the constants */
1467 fixup_pv_table(&__pv_table_begin,
1468 (&__pv_table_end - &__pv_table_begin) << 2);
1469
1470 /*
1471 * Cache cleaning operations for self-modifying code
1472 * We should clean the entries by MVA but running a
1473 * for loop over every pv_table entry pointer would
1474 * just complicate the code.
1475 */
1476 flush_cache_louis();
1477 dsb();
1478 isb();
1479
1480 /* remap level 1 table */
1481 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
1482 set_pud(pud0,
1483 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
1484 pmd0 += PTRS_PER_PMD;
1485 }
1486
1487 /* remap pmds for kernel mapping */
1488 phys = __pa(map_start) & PMD_MASK;
1489 do {
1490 *pmdk++ = __pmd(phys | pmdprot);
1491 phys += PMD_SIZE;
1492 } while (phys < map_end);
1493
1494 flush_cache_all();
1495 cpu_switch_mm(pgd0, &init_mm);
1496 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
1497 local_flush_bp_all();
1498 local_flush_tlb_all();
1499}
1500
1501#else
1502
1503void __init early_paging_init(const struct machine_desc *mdesc,
1504 struct proc_info_list *procinfo)
1505{
1506 if (mdesc->init_meminfo)
1507 mdesc->init_meminfo();
1508}
1509
1510#endif
1511
Russell Kingd111e8f2006-09-27 15:27:33 +01001512/*
1513 * paging_init() sets up the page tables, initialises the zone memory
1514 * maps, and sets up the zero page, bad page and bad page tables.
1515 */
Russell Kingff69a4c2013-07-26 14:55:59 +01001516void __init paging_init(const struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001517{
1518 void *zero_page;
1519
1520 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001521 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001522 map_lowmem();
Marek Szyprowskic7909502011-12-29 13:09:51 +01001523 dma_contiguous_remap();
Russell Kingd111e8f2006-09-27 15:27:33 +01001524 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001525 kmap_init();
Joonsoo Kimde40614e2013-04-05 03:16:51 +01001526 tcm_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001527
1528 top_pmd = pmd_off_k(0xffff0000);
1529
Russell King3abe9d32010-03-25 17:02:59 +00001530 /* allocate the zero page. */
1531 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001532
Russell King8d717a52010-05-22 19:47:18 +01001533 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001534
Russell Kingd111e8f2006-09-27 15:27:33 +01001535 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001536 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001537}