blob: 12c7ad215ce732a743475a75a8c31f6dd3e7f5a9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
Russell Kingfced80c2008-09-06 12:10:45 +010027#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Russell King0ba8b9b2008-08-10 18:08:10 +010029#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/cacheflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010031#include <asm/mmu_context.h>
32#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/tlbflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010034#include <asm/sizes.h>
35
Russell Kingb29e9f52007-04-21 10:47:29 +010036#include <asm/mach/map.h>
37#include "mm.h"
38
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020039int ioremap_page(unsigned long virt, unsigned long phys,
40 const struct mem_type *mtype)
41{
Russell Kingd7461962010-07-26 10:29:13 +010042 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
43 __pgprot(mtype->prot_pte));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020044}
45EXPORT_SYMBOL(ioremap_page);
Russell Kingff0daca2006-06-29 20:17:15 +010046
47void __check_kvm_seq(struct mm_struct *mm)
48{
49 unsigned int seq;
50
51 do {
52 seq = init_mm.context.kvm_seq;
53 memcpy(pgd_offset(mm, VMALLOC_START),
54 pgd_offset_k(VMALLOC_START),
55 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
56 pgd_index(VMALLOC_START)));
57 mm->context.kvm_seq = seq;
58 } while (seq != init_mm.context.kvm_seq);
59}
60
61#ifndef CONFIG_SMP
62/*
63 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
64 * the other CPUs will not see this change until their next context switch.
65 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
66 * which requires the new ioremap'd region to be referenced, the CPU will
67 * reference the _old_ region.
68 *
Russell King31aa8fd2009-12-18 11:10:03 +000069 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
70 * mask the size back to 1MB aligned or we will overflow in the loop below.
Russell Kingff0daca2006-06-29 20:17:15 +010071 */
72static void unmap_area_sections(unsigned long virt, unsigned long size)
73{
Russell King24f11ec2009-01-25 17:36:34 +000074 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
Russell Kingff0daca2006-06-29 20:17:15 +010075 pgd_t *pgd;
76
77 flush_cache_vunmap(addr, end);
78 pgd = pgd_offset_k(addr);
79 do {
80 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
81
82 pmd = *pmdp;
83 if (!pmd_none(pmd)) {
84 /*
85 * Clear the PMD from the page table, and
86 * increment the kvm sequence so others
87 * notice this change.
88 *
89 * Note: this is still racy on SMP machines.
90 */
91 pmd_clear(pmdp);
92 init_mm.context.kvm_seq++;
93
94 /*
95 * Free the page table, if there was one.
96 */
97 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080098 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +010099 }
100
101 addr += PGDIR_SIZE;
102 pgd++;
103 } while (addr < end);
104
105 /*
106 * Ensure that the active_mm is up to date - we want to
107 * catch any use-after-iounmap cases.
108 */
109 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
110 __check_kvm_seq(current->active_mm);
111
112 flush_tlb_kernel_range(virt, end);
113}
114
115static int
116remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100117 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100118{
Russell Kingb29e9f52007-04-21 10:47:29 +0100119 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100120 pgd_t *pgd;
121
122 /*
123 * Remove and free any PTE-based mapping, and
124 * sync the current kernel mapping.
125 */
126 unmap_area_sections(virt, size);
127
Russell Kingff0daca2006-06-29 20:17:15 +0100128 pgd = pgd_offset_k(addr);
129 do {
130 pmd_t *pmd = pmd_offset(pgd, addr);
131
Russell Kingb29e9f52007-04-21 10:47:29 +0100132 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100133 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100134 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100135 pfn += SZ_1M >> PAGE_SHIFT;
136 flush_pmd_entry(pmd);
137
138 addr += PGDIR_SIZE;
139 pgd++;
140 } while (addr < end);
141
142 return 0;
143}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100144
145static int
146remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100147 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100148{
Russell Kingb29e9f52007-04-21 10:47:29 +0100149 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100150 pgd_t *pgd;
151
152 /*
153 * Remove and free any PTE-based mapping, and
154 * sync the current kernel mapping.
155 */
156 unmap_area_sections(virt, size);
157
Lennert Buytenheka069c892006-07-01 19:58:20 +0100158 pgd = pgd_offset_k(virt);
159 do {
160 unsigned long super_pmd_val, i;
161
Russell Kingb29e9f52007-04-21 10:47:29 +0100162 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
163 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100164 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
165
166 for (i = 0; i < 8; i++) {
167 pmd_t *pmd = pmd_offset(pgd, addr);
168
169 pmd[0] = __pmd(super_pmd_val);
170 pmd[1] = __pmd(super_pmd_val);
171 flush_pmd_entry(pmd);
172
173 addr += PGDIR_SIZE;
174 pgd++;
175 }
176
177 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
178 } while (addr < end);
179
180 return 0;
181}
Russell Kingff0daca2006-06-29 20:17:15 +0100182#endif
183
Russell King31aa8fd2009-12-18 11:10:03 +0000184void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
185 unsigned long offset, size_t size, unsigned int mtype, void *caller)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000186{
Russell Kingb29e9f52007-04-21 10:47:29 +0100187 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100188 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000189 unsigned long addr;
190 struct vm_struct * area;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100191
192 /*
193 * High mappings must be supersection aligned
194 */
195 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
196 return NULL;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000197
Russell King3603ab22007-05-05 20:59:27 +0100198 type = get_mem_type(mtype);
199 if (!type)
200 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100201
Russell King6d78b5f2007-06-03 19:26:04 +0100202 /*
203 * Page align the mapping size, taking account of any offset.
204 */
205 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000206
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400207 /*
208 * Try to reuse one of the static mapping whenever possible.
209 */
210 read_lock(&vmlist_lock);
211 for (area = vmlist; area; area = area->next) {
212 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
213 break;
214 if (!(area->flags & VM_ARM_STATIC_MAPPING))
215 continue;
216 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
217 continue;
218 if (__phys_to_pfn(area->phys_addr) > pfn ||
219 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
220 continue;
221 /* we can drop the lock here as we know *area is static */
222 read_unlock(&vmlist_lock);
223 addr = (unsigned long)area->addr;
224 addr += __pfn_to_phys(pfn) - area->phys_addr;
225 return (void __iomem *) (offset + addr);
226 }
227 read_unlock(&vmlist_lock);
228
229 /*
230 * Don't allow RAM to be mapped - this causes problems with ARMv6+
231 */
232 if (WARN_ON(pfn_valid(pfn)))
233 return NULL;
234
Russell King31aa8fd2009-12-18 11:10:03 +0000235 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000236 if (!area)
237 return NULL;
238 addr = (unsigned long)area->addr;
Russell Kingff0daca2006-06-29 20:17:15 +0100239
240#ifndef CONFIG_SMP
Catalin Marinas412489a2007-01-25 14:16:47 +0100241 if (DOMAIN_IO == 0 &&
242 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100243 cpu_is_xsc3()) && pfn >= 0x100000 &&
Lennert Buytenheka069c892006-07-01 19:58:20 +0100244 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
245 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100246 err = remap_area_supersections(addr, pfn, size, type);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100247 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100248 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100249 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100250 } else
251#endif
Russell Kingd7461962010-07-26 10:29:13 +0100252 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
253 __pgprot(type->prot_pte));
Russell Kingff0daca2006-06-29 20:17:15 +0100254
255 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100256 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000257 return NULL;
258 }
Russell Kingff0daca2006-06-29 20:17:15 +0100259
260 flush_cache_vmap(addr, addr + size);
261 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000262}
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000263
Russell King31aa8fd2009-12-18 11:10:03 +0000264void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
265 unsigned int mtype, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000267 unsigned long last_addr;
268 unsigned long offset = phys_addr & ~PAGE_MASK;
269 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000271 /*
272 * Don't allow wraparound or zero size
273 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 last_addr = phys_addr + size - 1;
275 if (!size || last_addr < phys_addr)
276 return NULL;
277
Russell King31aa8fd2009-12-18 11:10:03 +0000278 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
279 caller);
280}
281
282/*
283 * Remap an arbitrary physical address space into the kernel virtual
284 * address space. Needed when the kernel wants to access high addresses
285 * directly.
286 *
287 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
288 * have to convert them into an offset in a page-aligned mapping, but the
289 * caller shouldn't need to know that small detail.
290 */
291void __iomem *
292__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
293 unsigned int mtype)
294{
295 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
296 __builtin_return_address(0));
297}
298EXPORT_SYMBOL(__arm_ioremap_pfn);
299
300void __iomem *
301__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
302{
303 return __arm_ioremap_caller(phys_addr, size, mtype,
304 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
Russell King3603ab22007-05-05 20:59:27 +0100306EXPORT_SYMBOL(__arm_ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100308/*
309 * Remap an arbitrary physical address space into the kernel virtual
310 * address space as memory. Needed when the kernel wants to execute
311 * code in external memory. This is needed for reprogramming source
312 * clocks that would affect normal memory for example. Please see
313 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
314 */
315void __iomem *
316__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
317{
318 unsigned int mtype;
319
320 if (cached)
321 mtype = MT_MEMORY;
322 else
323 mtype = MT_MEMORY_NONCACHED;
324
325 return __arm_ioremap_caller(phys_addr, size, mtype,
326 __builtin_return_address(0));
327}
328
Russell King09d9bae2008-09-05 14:08:44 +0100329void __iounmap(volatile void __iomem *io_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Russell King09d9bae2008-09-05 14:08:44 +0100331 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400332 struct vm_struct *vm;
Russell Kingff0daca2006-06-29 20:17:15 +0100333
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400334 read_lock(&vmlist_lock);
335 for (vm = vmlist; vm; vm = vm->next) {
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400336 if (vm->addr > addr)
337 break;
338 if (!(vm->flags & VM_IOREMAP))
339 continue;
340 /* If this is a static mapping we must leave it alone */
341 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
342 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
343 read_unlock(&vmlist_lock);
344 return;
345 }
346#ifndef CONFIG_SMP
347 /*
348 * If this is a section based mapping we need to handle it
349 * specially as the VM subsystem does not know how to handle
350 * such a beast.
351 */
352 if ((vm->addr == addr) &&
353 (vm->flags & VM_ARM_SECTION_MAPPING)) {
354 unmap_area_sections((unsigned long)vm->addr, vm->size);
Russell Kingff0daca2006-06-29 20:17:15 +0100355 break;
356 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400357#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100358 }
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400359 read_unlock(&vmlist_lock);
Russell Kingff0daca2006-06-29 20:17:15 +0100360
Russell King24f11ec2009-01-25 17:36:34 +0000361 vunmap(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363EXPORT_SYMBOL(__iounmap);