Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. |
| 3 | * This is needed for high PCI addresses that aren't mapped in the |
| 4 | * 640k-1MB IO memory area on PC's |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | */ |
| 8 | |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 9 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/init.h> |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 11 | #include <linux/io.h> |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 16 | #include <asm/cacheflush.h> |
| 17 | #include <asm/e820.h> |
| 18 | #include <asm/fixmap.h> |
| 19 | #include <asm/pgtable.h> |
| 20 | #include <asm/tlbflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 23 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
| 24 | * conflicts. |
| 25 | */ |
| 26 | static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, |
| 27 | pgprot_t prot) |
| 28 | { |
| 29 | unsigned long npages, vaddr, last_addr = phys_addr + size - 1; |
| 30 | int err, level; |
| 31 | |
| 32 | /* No change for pages after the last mapping */ |
| 33 | if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) |
| 34 | return 0; |
| 35 | |
| 36 | npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 37 | vaddr = (unsigned long) __va(phys_addr); |
| 38 | |
| 39 | /* |
| 40 | * If there is no identity map for this address, |
| 41 | * change_page_attr_addr is unnecessary |
| 42 | */ |
| 43 | if (!lookup_address(vaddr, &level)) |
| 44 | return 0; |
| 45 | |
| 46 | /* |
| 47 | * Must use an address here and not struct page because the |
| 48 | * phys addr can be a in hole between nodes and not have a |
| 49 | * memmap entry. |
| 50 | */ |
| 51 | err = change_page_attr_addr(vaddr, npages, prot); |
| 52 | if (!err) |
| 53 | global_flush_tlb(); |
| 54 | |
| 55 | return err; |
| 56 | } |
| 57 | |
| 58 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | * Remap an arbitrary physical address space into the kernel virtual |
| 60 | * address space. Needed when the kernel wants to access high addresses |
| 61 | * directly. |
| 62 | * |
| 63 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 64 | * have to convert them into an offset in a page-aligned mapping, but the |
| 65 | * caller shouldn't need to know that small detail. |
| 66 | */ |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 67 | void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, |
| 68 | unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 70 | void __iomem *addr; |
| 71 | struct vm_struct *area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | unsigned long offset, last_addr; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 73 | pgprot_t pgprot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | /* Don't allow wraparound or zero size */ |
| 76 | last_addr = phys_addr + size - 1; |
| 77 | if (!size || last_addr < phys_addr) |
| 78 | return NULL; |
| 79 | |
| 80 | /* |
| 81 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 82 | */ |
| 83 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
Thomas Gleixner | 4b40fce | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 84 | return (__force void __iomem *)phys_to_virt(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | |
| 86 | /* |
| 87 | * Don't allow anybody to remap normal RAM that we're using.. |
| 88 | */ |
| 89 | if (phys_addr <= virt_to_phys(high_memory - 1)) { |
| 90 | char *t_addr, *t_end; |
| 91 | struct page *page; |
| 92 | |
| 93 | t_addr = __va(phys_addr); |
| 94 | t_end = t_addr + (size - 1); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 95 | |
| 96 | for (page = virt_to_page(t_addr); |
| 97 | page <= virt_to_page(t_end); page++) |
| 98 | if (!PageReserved(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | return NULL; |
| 100 | } |
| 101 | |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 102 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | /* |
| 105 | * Mappings have to be page-aligned |
| 106 | */ |
| 107 | offset = phys_addr & ~PAGE_MASK; |
| 108 | phys_addr &= PAGE_MASK; |
| 109 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 110 | |
| 111 | /* |
| 112 | * Ok, go for it.. |
| 113 | */ |
Thomas Gleixner | 74ff285 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 114 | area = get_vm_area(size, VM_IOREMAP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | if (!area) |
| 116 | return NULL; |
| 117 | area->phys_addr = phys_addr; |
| 118 | addr = (void __iomem *) area->addr; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 119 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
| 120 | phys_addr, pgprot)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | vunmap((void __force *) addr); |
| 122 | return NULL; |
| 123 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 124 | |
| 125 | if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { |
| 126 | vunmap(addr); |
| 127 | return NULL; |
| 128 | } |
| 129 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | return (void __iomem *) (offset + (char __iomem *)addr); |
| 131 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 132 | EXPORT_SYMBOL(__ioremap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | |
| 134 | /** |
| 135 | * ioremap_nocache - map bus memory into CPU space |
| 136 | * @offset: bus address of the memory |
| 137 | * @size: size of the resource to map |
| 138 | * |
| 139 | * ioremap_nocache performs a platform specific sequence of operations to |
| 140 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 141 | * writew/writel functions and the other mmio helpers. The returned |
| 142 | * address is not guaranteed to be usable directly as a virtual |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 143 | * address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | * |
| 145 | * This version of ioremap ensures that the memory is marked uncachable |
| 146 | * on the CPU as well as honouring existing caching rules from things like |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 147 | * the PCI bus. Note that there are other caches and buffers on many |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | * busses. In particular driver authors should read up on PCI writes |
| 149 | * |
| 150 | * It's useful if some control registers are in such an area and |
| 151 | * write combining or read caching is not desirable: |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 152 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | * Must be freed with iounmap. |
| 154 | */ |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 155 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | { |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 157 | return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 159 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 161 | /** |
| 162 | * iounmap - Free a IO remapping |
| 163 | * @addr: virtual address from ioremap_* |
| 164 | * |
| 165 | * Caller must ensure there is only one unmapping for the same pointer. |
| 166 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | void iounmap(volatile void __iomem *addr) |
| 168 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 169 | struct vm_struct *p, *o; |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 170 | |
| 171 | if ((void __force *)addr <= high_memory) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | return; |
| 173 | |
| 174 | /* |
| 175 | * __ioremap special-cases the PCI/ISA range by not instantiating a |
| 176 | * vm_area and by simply returning an address into the kernel mapping |
| 177 | * of ISA space. So handle that here. |
| 178 | */ |
| 179 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 180 | addr < phys_to_virt(ISA_END_ADDRESS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | return; |
| 182 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 183 | addr = (volatile void __iomem *) |
| 184 | (PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 185 | |
| 186 | /* Use the vm area unlocked, assuming the caller |
| 187 | ensures there isn't another iounmap for the same address |
| 188 | in parallel. Reuse of the virtual address is prevented by |
| 189 | leaving it in the global lists until we're done with it. |
| 190 | cpa takes care of the direct mappings. */ |
| 191 | read_lock(&vmlist_lock); |
| 192 | for (p = vmlist; p; p = p->next) { |
| 193 | if (p->addr == addr) |
| 194 | break; |
| 195 | } |
| 196 | read_unlock(&vmlist_lock); |
| 197 | |
| 198 | if (!p) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 199 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 200 | dump_stack(); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 201 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | } |
| 203 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 204 | /* Reset the direct mapping. Can block */ |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 205 | ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 206 | |
| 207 | /* Finally remove it */ |
| 208 | o = remove_vm_area((void *)addr); |
| 209 | BUG_ON(p != o || o == NULL); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 210 | kfree(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 212 | EXPORT_SYMBOL(iounmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 214 | |
| 215 | int __initdata early_ioremap_debug; |
| 216 | |
| 217 | static int __init early_ioremap_debug_setup(char *str) |
| 218 | { |
| 219 | early_ioremap_debug = 1; |
| 220 | |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 221 | return 0; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 222 | } |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 223 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 224 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 225 | static __initdata int after_paging_init; |
| 226 | static __initdata unsigned long bm_pte[1024] |
| 227 | __attribute__((aligned(PAGE_SIZE))); |
| 228 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 229 | static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 230 | { |
| 231 | return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); |
| 232 | } |
| 233 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 234 | static inline unsigned long * __init early_ioremap_pte(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 235 | { |
| 236 | return bm_pte + ((addr >> PAGE_SHIFT) & 1023); |
| 237 | } |
| 238 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 239 | void __init early_ioremap_init(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 240 | { |
| 241 | unsigned long *pgd; |
| 242 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 243 | if (early_ioremap_debug) |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 244 | printk(KERN_DEBUG "early_ioremap_init()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 245 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 246 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 247 | *pgd = __pa(bm_pte) | _PAGE_TABLE; |
| 248 | memset(bm_pte, 0, sizeof(bm_pte)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 249 | /* |
| 250 | * The boot-ioremap range spans multiple pgds, for which |
| 251 | * we are not prepared: |
| 252 | */ |
| 253 | if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { |
| 254 | WARN_ON(1); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 255 | printk(KERN_WARNING "pgd %p != %p\n", |
| 256 | pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); |
| 257 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
| 258 | fix_to_virt(FIX_BTMAP_BEGIN)); |
| 259 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
| 260 | fix_to_virt(FIX_BTMAP_END)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 261 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 262 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 263 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", |
| 264 | FIX_BTMAP_BEGIN); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 265 | } |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 266 | } |
| 267 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 268 | void __init early_ioremap_clear(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 269 | { |
| 270 | unsigned long *pgd; |
| 271 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 272 | if (early_ioremap_debug) |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 273 | printk(KERN_DEBUG "early_ioremap_clear()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 274 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 275 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 276 | *pgd = 0; |
| 277 | __flush_tlb_all(); |
| 278 | } |
| 279 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 280 | void __init early_ioremap_reset(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 281 | { |
| 282 | enum fixed_addresses idx; |
| 283 | unsigned long *pte, phys, addr; |
| 284 | |
| 285 | after_paging_init = 1; |
Huang, Ying | 64a8f85 | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 286 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 287 | addr = fix_to_virt(idx); |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 288 | pte = early_ioremap_pte(addr); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 289 | if (!*pte & _PAGE_PRESENT) { |
| 290 | phys = *pte & PAGE_MASK; |
| 291 | set_fixmap(idx, phys); |
| 292 | } |
| 293 | } |
| 294 | } |
| 295 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 296 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 297 | unsigned long phys, pgprot_t flags) |
| 298 | { |
| 299 | unsigned long *pte, addr = __fix_to_virt(idx); |
| 300 | |
| 301 | if (idx >= __end_of_fixed_addresses) { |
| 302 | BUG(); |
| 303 | return; |
| 304 | } |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 305 | pte = early_ioremap_pte(addr); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 306 | if (pgprot_val(flags)) |
| 307 | *pte = (phys & PAGE_MASK) | pgprot_val(flags); |
| 308 | else |
| 309 | *pte = 0; |
| 310 | __flush_tlb_one(addr); |
| 311 | } |
| 312 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 313 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 314 | unsigned long phys) |
| 315 | { |
| 316 | if (after_paging_init) |
| 317 | set_fixmap(idx, phys); |
| 318 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 319 | __early_set_fixmap(idx, phys, PAGE_KERNEL); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 320 | } |
| 321 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 322 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 323 | { |
| 324 | if (after_paging_init) |
| 325 | clear_fixmap(idx); |
| 326 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 327 | __early_set_fixmap(idx, 0, __pgprot(0)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 328 | } |
| 329 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 330 | |
| 331 | int __initdata early_ioremap_nested; |
| 332 | |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 333 | static int __init check_early_ioremap_leak(void) |
| 334 | { |
| 335 | if (!early_ioremap_nested) |
| 336 | return 0; |
| 337 | |
| 338 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 339 | "Debug warning: early ioremap leak of %d areas detected.\n", |
| 340 | early_ioremap_nested); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 341 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 342 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 343 | WARN_ON(1); |
| 344 | |
| 345 | return 1; |
| 346 | } |
| 347 | late_initcall(check_early_ioremap_leak); |
| 348 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 349 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | { |
| 351 | unsigned long offset, last_addr; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 352 | unsigned int nrpages, nesting; |
| 353 | enum fixed_addresses idx0, idx; |
| 354 | |
| 355 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 356 | |
| 357 | nesting = early_ioremap_nested; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 358 | if (early_ioremap_debug) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 359 | printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", |
| 360 | phys_addr, size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 361 | dump_stack(); |
| 362 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
| 364 | /* Don't allow wraparound or zero size */ |
| 365 | last_addr = phys_addr + size - 1; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 366 | if (!size || last_addr < phys_addr) { |
| 367 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 369 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 371 | if (nesting >= FIX_BTMAPS_NESTING) { |
| 372 | WARN_ON(1); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 373 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 374 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 375 | early_ioremap_nested++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | /* |
| 377 | * Mappings have to be page-aligned |
| 378 | */ |
| 379 | offset = phys_addr & ~PAGE_MASK; |
| 380 | phys_addr &= PAGE_MASK; |
| 381 | size = PAGE_ALIGN(last_addr) - phys_addr; |
| 382 | |
| 383 | /* |
| 384 | * Mappings have to fit in the FIX_BTMAP area. |
| 385 | */ |
| 386 | nrpages = size >> PAGE_SHIFT; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 387 | if (nrpages > NR_FIX_BTMAPS) { |
| 388 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 390 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | |
| 392 | /* |
| 393 | * Ok, go for it.. |
| 394 | */ |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 395 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
| 396 | idx = idx0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 398 | early_set_fixmap(idx, phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | phys_addr += PAGE_SIZE; |
| 400 | --idx; |
| 401 | --nrpages; |
| 402 | } |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 403 | if (early_ioremap_debug) |
| 404 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 405 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 406 | return (void *) (offset + fix_to_virt(idx0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | } |
| 408 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 409 | void __init early_iounmap(void *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | { |
| 411 | unsigned long virt_addr; |
| 412 | unsigned long offset; |
| 413 | unsigned int nrpages; |
| 414 | enum fixed_addresses idx; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 415 | unsigned int nesting; |
| 416 | |
| 417 | nesting = --early_ioremap_nested; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 418 | WARN_ON(nesting < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 420 | if (early_ioremap_debug) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 421 | printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, |
| 422 | size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 423 | dump_stack(); |
| 424 | } |
| 425 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | virt_addr = (unsigned long)addr; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 427 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
| 428 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | return; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 430 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 431 | offset = virt_addr & ~PAGE_MASK; |
| 432 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; |
| 433 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 434 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 435 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 436 | early_clear_fixmap(idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | --idx; |
| 438 | --nrpages; |
| 439 | } |
| 440 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 441 | |
| 442 | void __this_fixmap_does_not_exist(void) |
| 443 | { |
| 444 | WARN_ON(1); |
| 445 | } |