blob: 5273ac4aa96a7e0bfc46a7ae597acb0ee4085603 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Linus Torvalds1da177e2005-04-16 15:20:36 -070022/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010023 * Fix up the linear direct mapping of the kernel to avoid cache attribute
24 * conflicts.
25 */
26static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
27 pgprot_t prot)
28{
29 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
30 int err, level;
31
32 /* No change for pages after the last mapping */
33 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
34 return 0;
35
36 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
37 vaddr = (unsigned long) __va(phys_addr);
38
39 /*
40 * If there is no identity map for this address,
41 * change_page_attr_addr is unnecessary
42 */
43 if (!lookup_address(vaddr, &level))
44 return 0;
45
46 /*
47 * Must use an address here and not struct page because the
48 * phys addr can be a in hole between nodes and not have a
49 * memmap entry.
50 */
51 err = change_page_attr_addr(vaddr, npages, prot);
52 if (!err)
53 global_flush_tlb();
54
55 return err;
56}
57
58/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 * Remap an arbitrary physical address space into the kernel virtual
60 * address space. Needed when the kernel wants to access high addresses
61 * directly.
62 *
63 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
64 * have to convert them into an offset in a page-aligned mapping, but the
65 * caller shouldn't need to know that small detail.
66 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +010067void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
68 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Thomas Gleixner91eebf42008-01-30 13:34:05 +010070 void __iomem *addr;
71 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 unsigned long offset, last_addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010073 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
78 return NULL;
79
80 /*
81 * Don't remap the low PCI/ISA area, it's always mapped..
82 */
83 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +010084 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
86 /*
87 * Don't allow anybody to remap normal RAM that we're using..
88 */
89 if (phys_addr <= virt_to_phys(high_memory - 1)) {
90 char *t_addr, *t_end;
91 struct page *page;
92
93 t_addr = __va(phys_addr);
94 t_end = t_addr + (size - 1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +010095
96 for (page = virt_to_page(t_addr);
97 page <= virt_to_page(t_end); page++)
98 if (!PageReserved(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 return NULL;
100 }
101
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100102 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 /*
105 * Mappings have to be page-aligned
106 */
107 offset = phys_addr & ~PAGE_MASK;
108 phys_addr &= PAGE_MASK;
109 size = PAGE_ALIGN(last_addr+1) - phys_addr;
110
111 /*
112 * Ok, go for it..
113 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100114 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 if (!area)
116 return NULL;
117 area->phys_addr = phys_addr;
118 addr = (void __iomem *) area->addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100119 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
120 phys_addr, pgprot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 vunmap((void __force *) addr);
122 return NULL;
123 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100124
125 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
126 vunmap(addr);
127 return NULL;
128 }
129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return (void __iomem *) (offset + (char __iomem *)addr);
131}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700132EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
134/**
135 * ioremap_nocache - map bus memory into CPU space
136 * @offset: bus address of the memory
137 * @size: size of the resource to map
138 *
139 * ioremap_nocache performs a platform specific sequence of operations to
140 * make bus memory CPU accessible via the readb/readw/readl/writeb/
141 * writew/writel functions and the other mmio helpers. The returned
142 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100143 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 *
145 * This version of ioremap ensures that the memory is marked uncachable
146 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100147 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 * busses. In particular driver authors should read up on PCI writes
149 *
150 * It's useful if some control registers are in such an area and
151 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100152 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 * Must be freed with iounmap.
154 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100155void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100157 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700159EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Andi Kleenbf5421c2005-12-12 22:17:09 -0800161/**
162 * iounmap - Free a IO remapping
163 * @addr: virtual address from ioremap_*
164 *
165 * Caller must ensure there is only one unmapping for the same pointer.
166 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167void iounmap(volatile void __iomem *addr)
168{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800169 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700170
171 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 return;
173
174 /*
175 * __ioremap special-cases the PCI/ISA range by not instantiating a
176 * vm_area and by simply returning an address into the kernel mapping
177 * of ISA space. So handle that here.
178 */
179 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100180 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 return;
182
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100183 addr = (volatile void __iomem *)
184 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800185
186 /* Use the vm area unlocked, assuming the caller
187 ensures there isn't another iounmap for the same address
188 in parallel. Reuse of the virtual address is prevented by
189 leaving it in the global lists until we're done with it.
190 cpa takes care of the direct mappings. */
191 read_lock(&vmlist_lock);
192 for (p = vmlist; p; p = p->next) {
193 if (p->addr == addr)
194 break;
195 }
196 read_unlock(&vmlist_lock);
197
198 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100199 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700200 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800201 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
203
Andi Kleenbf5421c2005-12-12 22:17:09 -0800204 /* Reset the direct mapping. Can block */
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100205 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800206
207 /* Finally remove it */
208 o = remove_vm_area((void *)addr);
209 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100210 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700212EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Ingo Molnard18d6d62008-01-30 13:33:45 +0100214
215int __initdata early_ioremap_debug;
216
217static int __init early_ioremap_debug_setup(char *str)
218{
219 early_ioremap_debug = 1;
220
Huang, Ying793b24a2008-01-30 13:33:45 +0100221 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100222}
Huang, Ying793b24a2008-01-30 13:33:45 +0100223early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100224
Huang, Ying0947b2f2008-01-30 13:33:44 +0100225static __initdata int after_paging_init;
226static __initdata unsigned long bm_pte[1024]
227 __attribute__((aligned(PAGE_SIZE)));
228
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100229static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100230{
231 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
232}
233
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100234static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100235{
236 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
237}
238
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100239void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100240{
241 unsigned long *pgd;
242
Ingo Molnard18d6d62008-01-30 13:33:45 +0100243 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100244 printk(KERN_DEBUG "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100245
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100246 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100247 *pgd = __pa(bm_pte) | _PAGE_TABLE;
248 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100249 /*
250 * The boot-ioremap range spans multiple pgds, for which
251 * we are not prepared:
252 */
253 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
254 WARN_ON(1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100255 printk(KERN_WARNING "pgd %p != %p\n",
256 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
257 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
258 fix_to_virt(FIX_BTMAP_BEGIN));
259 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
260 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100261
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100262 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
263 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
264 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100265 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100266}
267
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100268void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100269{
270 unsigned long *pgd;
271
Ingo Molnard18d6d62008-01-30 13:33:45 +0100272 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100273 printk(KERN_DEBUG "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100274
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100275 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100276 *pgd = 0;
277 __flush_tlb_all();
278}
279
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100280void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100281{
282 enum fixed_addresses idx;
283 unsigned long *pte, phys, addr;
284
285 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100286 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100287 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100288 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100289 if (!*pte & _PAGE_PRESENT) {
290 phys = *pte & PAGE_MASK;
291 set_fixmap(idx, phys);
292 }
293 }
294}
295
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100296static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100297 unsigned long phys, pgprot_t flags)
298{
299 unsigned long *pte, addr = __fix_to_virt(idx);
300
301 if (idx >= __end_of_fixed_addresses) {
302 BUG();
303 return;
304 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100305 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100306 if (pgprot_val(flags))
307 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
308 else
309 *pte = 0;
310 __flush_tlb_one(addr);
311}
312
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100313static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100314 unsigned long phys)
315{
316 if (after_paging_init)
317 set_fixmap(idx, phys);
318 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100319 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100320}
321
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100322static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100323{
324 if (after_paging_init)
325 clear_fixmap(idx);
326 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100327 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100328}
329
Ingo Molnar1b42f512008-01-30 13:33:45 +0100330
331int __initdata early_ioremap_nested;
332
Ingo Molnard690b2a2008-01-30 13:33:47 +0100333static int __init check_early_ioremap_leak(void)
334{
335 if (!early_ioremap_nested)
336 return 0;
337
338 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100339 "Debug warning: early ioremap leak of %d areas detected.\n",
340 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100341 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100342 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100343 WARN_ON(1);
344
345 return 1;
346}
347late_initcall(check_early_ioremap_leak);
348
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100349void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100352 unsigned int nrpages, nesting;
353 enum fixed_addresses idx0, idx;
354
355 WARN_ON(system_state != SYSTEM_BOOTING);
356
357 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100358 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100359 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
360 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100361 dump_stack();
362 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 /* Don't allow wraparound or zero size */
365 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100366 if (!size || last_addr < phys_addr) {
367 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100371 if (nesting >= FIX_BTMAPS_NESTING) {
372 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100373 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100374 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100375 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 /*
377 * Mappings have to be page-aligned
378 */
379 offset = phys_addr & ~PAGE_MASK;
380 phys_addr &= PAGE_MASK;
381 size = PAGE_ALIGN(last_addr) - phys_addr;
382
383 /*
384 * Mappings have to fit in the FIX_BTMAP area.
385 */
386 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100387 if (nrpages > NR_FIX_BTMAPS) {
388 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 /*
393 * Ok, go for it..
394 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100395 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
396 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100398 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 phys_addr += PAGE_SIZE;
400 --idx;
401 --nrpages;
402 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100403 if (early_ioremap_debug)
404 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100405
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100406 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407}
408
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100409void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410{
411 unsigned long virt_addr;
412 unsigned long offset;
413 unsigned int nrpages;
414 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100415 unsigned int nesting;
416
417 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100418 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419
Ingo Molnard18d6d62008-01-30 13:33:45 +0100420 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100421 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
422 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100423 dump_stack();
424 }
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100427 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
428 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 offset = virt_addr & ~PAGE_MASK;
432 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
433
Ingo Molnar1b42f512008-01-30 13:33:45 +0100434 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100436 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 --idx;
438 --nrpages;
439 }
440}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100441
442void __this_fixmap_does_not_exist(void)
443{
444 WARN_ON(1);
445}