blob: b86f66fa5185e47f96131e5be866ce88f20d4eb4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010022enum ioremap_mode {
23 IOR_MODE_UNCACHED,
24 IOR_MODE_CACHED,
25};
26
Thomas Gleixner240d3a72008-01-30 13:34:05 +010027#ifdef CONFIG_X86_64
28
29unsigned long __phys_addr(unsigned long x)
30{
31 if (x >= __START_KERNEL_map)
32 return x - __START_KERNEL_map + phys_base;
33 return x - PAGE_OFFSET;
34}
35EXPORT_SYMBOL(__phys_addr);
36
37#endif
38
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010039int page_is_ram(unsigned long pagenr)
40{
41 unsigned long addr, end;
42 int i;
43
44 for (i = 0; i < e820.nr_map; i++) {
45 /*
46 * Not usable memory:
47 */
48 if (e820.map[i].type != E820_RAM)
49 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010050 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
51 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010052
53 /*
54 * Sanity check: Some BIOSen report areas as RAM that
55 * are not. Notably the 640->1Mb area, which is the
56 * PCI BIOS area.
57 */
58 if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
59 end < (BIOS_END >> PAGE_SHIFT))
60 continue;
61
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010062 if ((pagenr >= addr) && (pagenr < end))
63 return 1;
64 }
65 return 0;
66}
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010069 * Fix up the linear direct mapping of the kernel to avoid cache attribute
70 * conflicts.
71 */
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010072static int ioremap_change_attr(unsigned long paddr, unsigned long size,
73 enum ioremap_mode mode)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010074{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010075 unsigned long vaddr = (unsigned long)__va(paddr);
76 unsigned long nrpages = size >> PAGE_SHIFT;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010077 int err, level;
78
79 /* No change for pages after the last mapping */
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010080 if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010081 return 0;
82
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010083 /*
84 * If there is no identity map for this address,
85 * change_page_attr_addr is unnecessary
86 */
87 if (!lookup_address(vaddr, &level))
88 return 0;
89
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010090 switch (mode) {
91 case IOR_MODE_UNCACHED:
92 default:
93 err = set_memory_uc(vaddr, nrpages);
94 break;
95 case IOR_MODE_CACHED:
96 err = set_memory_wb(vaddr, nrpages);
97 break;
98 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010099 if (!err)
100 global_flush_tlb();
101
102 return err;
103}
104
105/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * Remap an arbitrary physical address space into the kernel virtual
107 * address space. Needed when the kernel wants to access high addresses
108 * directly.
109 *
110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
111 * have to convert them into an offset in a page-aligned mapping, but the
112 * caller shouldn't need to know that small detail.
113 */
Thomas Gleixner5f868152008-01-30 13:34:06 +0100114static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100115 enum ioremap_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100117 void __iomem *addr;
118 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 unsigned long offset, last_addr;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100120 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122 /* Don't allow wraparound or zero size */
123 last_addr = phys_addr + size - 1;
124 if (!size || last_addr < phys_addr)
125 return NULL;
126
127 /*
128 * Don't remap the low PCI/ISA area, it's always mapped..
129 */
130 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100131 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133 /*
134 * Don't allow anybody to remap normal RAM that we're using..
135 */
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100136 for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
137 (offset << PAGE_SHIFT) < last_addr; offset++) {
138 if (page_is_ram(offset))
139 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 }
141
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100142 switch (mode) {
143 case IOR_MODE_UNCACHED:
144 default:
145 prot = PAGE_KERNEL_NOCACHE;
146 break;
147 case IOR_MODE_CACHED:
148 prot = PAGE_KERNEL;
149 break;
150 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 /*
153 * Mappings have to be page-aligned
154 */
155 offset = phys_addr & ~PAGE_MASK;
156 phys_addr &= PAGE_MASK;
157 size = PAGE_ALIGN(last_addr+1) - phys_addr;
158
159 /*
160 * Ok, go for it..
161 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100162 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (!area)
164 return NULL;
165 area->phys_addr = phys_addr;
166 addr = (void __iomem *) area->addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100167 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100168 phys_addr, prot)) {
Thomas Gleixnere4c1b972008-01-30 13:34:05 +0100169 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 return NULL;
171 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100172
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100173 if (ioremap_change_attr(phys_addr, size, mode) < 0) {
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100174 vunmap(addr);
175 return NULL;
176 }
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return (void __iomem *) (offset + (char __iomem *)addr);
179}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
181/**
182 * ioremap_nocache - map bus memory into CPU space
183 * @offset: bus address of the memory
184 * @size: size of the resource to map
185 *
186 * ioremap_nocache performs a platform specific sequence of operations to
187 * make bus memory CPU accessible via the readb/readw/readl/writeb/
188 * writew/writel functions and the other mmio helpers. The returned
189 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100190 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 *
192 * This version of ioremap ensures that the memory is marked uncachable
193 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100194 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * busses. In particular driver authors should read up on PCI writes
196 *
197 * It's useful if some control registers are in such an area and
198 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100199 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 * Must be freed with iounmap.
201 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100202void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100204 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700206EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Thomas Gleixner5f868152008-01-30 13:34:06 +0100208void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
209{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100210 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100211}
212EXPORT_SYMBOL(ioremap_cache);
213
Andi Kleenbf5421c2005-12-12 22:17:09 -0800214/**
215 * iounmap - Free a IO remapping
216 * @addr: virtual address from ioremap_*
217 *
218 * Caller must ensure there is only one unmapping for the same pointer.
219 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220void iounmap(volatile void __iomem *addr)
221{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800222 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700223
224 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return;
226
227 /*
228 * __ioremap special-cases the PCI/ISA range by not instantiating a
229 * vm_area and by simply returning an address into the kernel mapping
230 * of ISA space. So handle that here.
231 */
232 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100233 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 return;
235
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100236 addr = (volatile void __iomem *)
237 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800238
239 /* Use the vm area unlocked, assuming the caller
240 ensures there isn't another iounmap for the same address
241 in parallel. Reuse of the virtual address is prevented by
242 leaving it in the global lists until we're done with it.
243 cpa takes care of the direct mappings. */
244 read_lock(&vmlist_lock);
245 for (p = vmlist; p; p = p->next) {
246 if (p->addr == addr)
247 break;
248 }
249 read_unlock(&vmlist_lock);
250
251 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100252 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700253 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800254 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
256
Andi Kleenbf5421c2005-12-12 22:17:09 -0800257 /* Reset the direct mapping. Can block */
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100258 ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800259
260 /* Finally remove it */
261 o = remove_vm_area((void *)addr);
262 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100263 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700265EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100267#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100268
269int __initdata early_ioremap_debug;
270
271static int __init early_ioremap_debug_setup(char *str)
272{
273 early_ioremap_debug = 1;
274
Huang, Ying793b24a2008-01-30 13:33:45 +0100275 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100276}
Huang, Ying793b24a2008-01-30 13:33:45 +0100277early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100278
Huang, Ying0947b2f2008-01-30 13:33:44 +0100279static __initdata int after_paging_init;
280static __initdata unsigned long bm_pte[1024]
281 __attribute__((aligned(PAGE_SIZE)));
282
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100283static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100284{
285 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
286}
287
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100288static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100289{
290 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
291}
292
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100293void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100294{
295 unsigned long *pgd;
296
Ingo Molnard18d6d62008-01-30 13:33:45 +0100297 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100298 printk(KERN_DEBUG "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100299
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100300 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100301 *pgd = __pa(bm_pte) | _PAGE_TABLE;
302 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100303 /*
304 * The boot-ioremap range spans multiple pgds, for which
305 * we are not prepared:
306 */
307 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
308 WARN_ON(1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100309 printk(KERN_WARNING "pgd %p != %p\n",
310 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
311 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
312 fix_to_virt(FIX_BTMAP_BEGIN));
313 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
314 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100315
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100316 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
317 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
318 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100319 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100320}
321
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100322void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100323{
324 unsigned long *pgd;
325
Ingo Molnard18d6d62008-01-30 13:33:45 +0100326 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100327 printk(KERN_DEBUG "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100328
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100329 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100330 *pgd = 0;
331 __flush_tlb_all();
332}
333
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100334void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100335{
336 enum fixed_addresses idx;
337 unsigned long *pte, phys, addr;
338
339 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100340 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100341 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100342 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100343 if (!*pte & _PAGE_PRESENT) {
344 phys = *pte & PAGE_MASK;
345 set_fixmap(idx, phys);
346 }
347 }
348}
349
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100350static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100351 unsigned long phys, pgprot_t flags)
352{
353 unsigned long *pte, addr = __fix_to_virt(idx);
354
355 if (idx >= __end_of_fixed_addresses) {
356 BUG();
357 return;
358 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100359 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100360 if (pgprot_val(flags))
361 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
362 else
363 *pte = 0;
364 __flush_tlb_one(addr);
365}
366
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100367static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100368 unsigned long phys)
369{
370 if (after_paging_init)
371 set_fixmap(idx, phys);
372 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100373 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100374}
375
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100376static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100377{
378 if (after_paging_init)
379 clear_fixmap(idx);
380 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100381 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100382}
383
Ingo Molnar1b42f512008-01-30 13:33:45 +0100384
385int __initdata early_ioremap_nested;
386
Ingo Molnard690b2a2008-01-30 13:33:47 +0100387static int __init check_early_ioremap_leak(void)
388{
389 if (!early_ioremap_nested)
390 return 0;
391
392 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100393 "Debug warning: early ioremap leak of %d areas detected.\n",
394 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100395 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100396 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100397 WARN_ON(1);
398
399 return 1;
400}
401late_initcall(check_early_ioremap_leak);
402
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100403void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
405 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100406 unsigned int nrpages, nesting;
407 enum fixed_addresses idx0, idx;
408
409 WARN_ON(system_state != SYSTEM_BOOTING);
410
411 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100412 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100413 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
414 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100415 dump_stack();
416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 /* Don't allow wraparound or zero size */
419 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100420 if (!size || last_addr < phys_addr) {
421 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100425 if (nesting >= FIX_BTMAPS_NESTING) {
426 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100427 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100428 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100429 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 /*
431 * Mappings have to be page-aligned
432 */
433 offset = phys_addr & ~PAGE_MASK;
434 phys_addr &= PAGE_MASK;
435 size = PAGE_ALIGN(last_addr) - phys_addr;
436
437 /*
438 * Mappings have to fit in the FIX_BTMAP area.
439 */
440 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100441 if (nrpages > NR_FIX_BTMAPS) {
442 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 /*
447 * Ok, go for it..
448 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100449 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
450 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100452 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 phys_addr += PAGE_SIZE;
454 --idx;
455 --nrpages;
456 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100457 if (early_ioremap_debug)
458 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100459
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100460 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461}
462
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100463void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
465 unsigned long virt_addr;
466 unsigned long offset;
467 unsigned int nrpages;
468 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100469 unsigned int nesting;
470
471 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100472 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Ingo Molnard18d6d62008-01-30 13:33:45 +0100474 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100475 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
476 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100477 dump_stack();
478 }
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100481 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
482 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 offset = virt_addr & ~PAGE_MASK;
486 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
487
Ingo Molnar1b42f512008-01-30 13:33:45 +0100488 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100490 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 --idx;
492 --nrpages;
493 }
494}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100495
496void __this_fixmap_does_not_exist(void)
497{
498 WARN_ON(1);
499}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100500
501#endif /* CONFIG_X86_32 */