blob: f8e6c4709cc2b43720dad5c608daa222e8a5e75e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -070014#include <linux/module.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070015#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/pgtable.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024/*
25 * Generic mapping function (not visible outside):
26 */
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38{
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070042 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
48
49 /*
50 * Don't remap the low PCI/ISA area, it's always mapped..
51 */
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * Don't allow anybody to remap normal RAM that we're using..
57 */
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
61
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
64
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
68 }
69
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070070 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71 | _PAGE_ACCESSED | flags);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /*
74 * Mappings have to be page-aligned
75 */
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
79
80 /*
81 * Ok, go for it..
82 */
83 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 addr = (void __iomem *) area->addr;
88 if (ioremap_page_range((unsigned long) addr,
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070089 (unsigned long) addr + size, phys_addr, prot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 vunmap((void __force *) addr);
91 return NULL;
92 }
93 return (void __iomem *) (offset + (char __iomem *)addr);
94}
Alexey Dobriyan129f6942005-06-23 00:08:33 -070095EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97/**
98 * ioremap_nocache - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap_nocache performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * This version of ioremap ensures that the memory is marked uncachable
109 * on the CPU as well as honouring existing caching rules from things like
110 * the PCI bus. Note that there are other caches and buffers on many
111 * busses. In particular driver authors should read up on PCI writes
112 *
113 * It's useful if some control registers are in such an area and
114 * write combining or read caching is not desirable:
115 *
116 * Must be freed with iounmap.
117 */
118
119void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
120{
121 unsigned long last_addr;
Siddha, Suresh B4138cc32008-01-30 13:33:43 +0100122 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 if (!p)
124 return p;
125
126 /* Guaranteed to be > phys_addr, as per __ioremap() */
127 last_addr = phys_addr + size - 1;
128
129 if (last_addr < virt_to_phys(high_memory) - 1) {
130 struct page *ppage = virt_to_page(__va(phys_addr));
131 unsigned long npages;
132
133 phys_addr &= PAGE_MASK;
134
135 /* This might overflow and become zero.. */
136 last_addr = PAGE_ALIGN(last_addr);
137
138 /* .. but that's ok, because modulo-2**n arithmetic will make
139 * the page-aligned "last - first" come out right.
140 */
141 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
142
143 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144 iounmap(p);
145 p = NULL;
146 }
147 global_flush_tlb();
148 }
149
150 return p;
151}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700152EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Andi Kleenbf5421c2005-12-12 22:17:09 -0800154/**
155 * iounmap - Free a IO remapping
156 * @addr: virtual address from ioremap_*
157 *
158 * Caller must ensure there is only one unmapping for the same pointer.
159 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160void iounmap(volatile void __iomem *addr)
161{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800162 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700163
164 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 return;
166
167 /*
168 * __ioremap special-cases the PCI/ISA range by not instantiating a
169 * vm_area and by simply returning an address into the kernel mapping
170 * of ISA space. So handle that here.
171 */
172 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173 addr < phys_to_virt(ISA_END_ADDRESS))
174 return;
175
Al Virob16b88e2005-12-15 09:17:50 +0000176 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800177
178 /* Use the vm area unlocked, assuming the caller
179 ensures there isn't another iounmap for the same address
180 in parallel. Reuse of the virtual address is prevented by
181 leaving it in the global lists until we're done with it.
182 cpa takes care of the direct mappings. */
183 read_lock(&vmlist_lock);
184 for (p = vmlist; p; p = p->next) {
185 if (p->addr == addr)
186 break;
187 }
188 read_unlock(&vmlist_lock);
189
190 if (!p) {
191 printk("iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700192 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800193 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195
Andi Kleenbf5421c2005-12-12 22:17:09 -0800196 /* Reset the direct mapping. Can block */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 change_page_attr(virt_to_page(__va(p->phys_addr)),
Jeremy Fitzhardinge95851162007-07-21 17:11:35 +0200199 get_vm_area_size(p) >> PAGE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 PAGE_KERNEL);
201 global_flush_tlb();
202 }
Andi Kleenbf5421c2005-12-12 22:17:09 -0800203
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 kfree(p);
208}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700209EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Ingo Molnard18d6d62008-01-30 13:33:45 +0100211
212int __initdata early_ioremap_debug;
213
214static int __init early_ioremap_debug_setup(char *str)
215{
216 early_ioremap_debug = 1;
217
Huang, Ying793b24a2008-01-30 13:33:45 +0100218 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100219}
Huang, Ying793b24a2008-01-30 13:33:45 +0100220early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100221
Huang, Ying0947b2f2008-01-30 13:33:44 +0100222static __initdata int after_paging_init;
223static __initdata unsigned long bm_pte[1024]
224 __attribute__((aligned(PAGE_SIZE)));
225
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100226static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100227{
228 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
229}
230
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100231static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100232{
233 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
234}
235
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100236void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100237{
238 unsigned long *pgd;
239
Ingo Molnard18d6d62008-01-30 13:33:45 +0100240 if (early_ioremap_debug)
241 printk("early_ioremap_init()\n");
242
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100243 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100244 *pgd = __pa(bm_pte) | _PAGE_TABLE;
245 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100246 /*
247 * The boot-ioremap range spans multiple pgds, for which
248 * we are not prepared:
249 */
250 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
251 WARN_ON(1);
252 printk("pgd %p != %p\n",
253 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
254 printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
255 fix_to_virt(FIX_BTMAP_BEGIN));
256 printk("fix_to_virt(FIX_BTMAP_END): %08lx\n",
257 fix_to_virt(FIX_BTMAP_END));
258
259 printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
260 printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
261 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100262}
263
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100264void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100265{
266 unsigned long *pgd;
267
Ingo Molnard18d6d62008-01-30 13:33:45 +0100268 if (early_ioremap_debug)
269 printk("early_ioremap_clear()\n");
270
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100271 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100272 *pgd = 0;
273 __flush_tlb_all();
274}
275
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100276void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100277{
278 enum fixed_addresses idx;
279 unsigned long *pte, phys, addr;
280
281 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100282 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100283 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100284 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100285 if (!*pte & _PAGE_PRESENT) {
286 phys = *pte & PAGE_MASK;
287 set_fixmap(idx, phys);
288 }
289 }
290}
291
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100292static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100293 unsigned long phys, pgprot_t flags)
294{
295 unsigned long *pte, addr = __fix_to_virt(idx);
296
297 if (idx >= __end_of_fixed_addresses) {
298 BUG();
299 return;
300 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100301 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100302 if (pgprot_val(flags))
303 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
304 else
305 *pte = 0;
306 __flush_tlb_one(addr);
307}
308
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100309static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100310 unsigned long phys)
311{
312 if (after_paging_init)
313 set_fixmap(idx, phys);
314 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100315 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100316}
317
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100318static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100319{
320 if (after_paging_init)
321 clear_fixmap(idx);
322 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100323 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100324}
325
Ingo Molnar1b42f512008-01-30 13:33:45 +0100326
327int __initdata early_ioremap_nested;
328
Ingo Molnard690b2a2008-01-30 13:33:47 +0100329static int __init check_early_ioremap_leak(void)
330{
331 if (!early_ioremap_nested)
332 return 0;
333
334 printk(KERN_WARNING
335 "Debug warning: early ioremap leak of %d areas detected.\n",
336 early_ioremap_nested);
337 printk(KERN_WARNING
338 "please boot with early_ioremap_debug and report the dmesg.\n");
339 WARN_ON(1);
340
341 return 1;
342}
343late_initcall(check_early_ioremap_leak);
344
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100345void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100348 unsigned int nrpages, nesting;
349 enum fixed_addresses idx0, idx;
350
351 WARN_ON(system_state != SYSTEM_BOOTING);
352
353 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100354 if (early_ioremap_debug) {
355 printk("early_ioremap(%08lx, %08lx) [%d] => ",
356 phys_addr, size, nesting);
357 dump_stack();
358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 /* Don't allow wraparound or zero size */
361 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100362 if (!size || last_addr < phys_addr) {
363 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100367 if (nesting >= FIX_BTMAPS_NESTING) {
368 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100369 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100370 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100371 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 /*
373 * Mappings have to be page-aligned
374 */
375 offset = phys_addr & ~PAGE_MASK;
376 phys_addr &= PAGE_MASK;
377 size = PAGE_ALIGN(last_addr) - phys_addr;
378
379 /*
380 * Mappings have to fit in the FIX_BTMAP area.
381 */
382 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100383 if (nrpages > NR_FIX_BTMAPS) {
384 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
388 /*
389 * Ok, go for it..
390 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100391 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
392 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100394 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 phys_addr += PAGE_SIZE;
396 --idx;
397 --nrpages;
398 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100399 if (early_ioremap_debug)
400 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100401
402 return (void*) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100405void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 unsigned long virt_addr;
408 unsigned long offset;
409 unsigned int nrpages;
410 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100411 unsigned int nesting;
412
413 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100414 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Ingo Molnard18d6d62008-01-30 13:33:45 +0100416 if (early_ioremap_debug) {
417 printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting);
418 dump_stack();
419 }
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100422 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
423 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100425 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 offset = virt_addr & ~PAGE_MASK;
427 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
428
Ingo Molnar1b42f512008-01-30 13:33:45 +0100429 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100431 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 --idx;
433 --nrpages;
434 }
435}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100436
437void __this_fixmap_does_not_exist(void)
438{
439 WARN_ON(1);
440}