Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. |
| 3 | * This is needed for high PCI addresses that aren't mapped in the |
| 4 | * 640k-1MB IO memory area on PC's |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | */ |
| 8 | |
| 9 | #include <linux/vmalloc.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/slab.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 12 | #include <linux/module.h> |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 13 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/fixmap.h> |
| 15 | #include <asm/cacheflush.h> |
| 16 | #include <asm/tlbflush.h> |
| 17 | #include <asm/pgtable.h> |
| 18 | |
| 19 | #define ISA_START_ADDRESS 0xa0000 |
| 20 | #define ISA_END_ADDRESS 0x100000 |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * Remap an arbitrary physical address space into the kernel virtual |
| 24 | * address space. Needed when the kernel wants to access high addresses |
| 25 | * directly. |
| 26 | * |
| 27 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 28 | * have to convert them into an offset in a page-aligned mapping, but the |
| 29 | * caller shouldn't need to know that small detail. |
| 30 | */ |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 31 | void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, |
| 32 | unsigned long flags) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 34 | void __iomem *addr; |
| 35 | struct vm_struct *area; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | unsigned long offset, last_addr; |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 37 | pgprot_t prot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
| 39 | /* Don't allow wraparound or zero size */ |
| 40 | last_addr = phys_addr + size - 1; |
| 41 | if (!size || last_addr < phys_addr) |
| 42 | return NULL; |
| 43 | |
| 44 | /* |
| 45 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 46 | */ |
| 47 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
| 48 | return (void __iomem *) phys_to_virt(phys_addr); |
| 49 | |
| 50 | /* |
| 51 | * Don't allow anybody to remap normal RAM that we're using.. |
| 52 | */ |
| 53 | if (phys_addr <= virt_to_phys(high_memory - 1)) { |
| 54 | char *t_addr, *t_end; |
| 55 | struct page *page; |
| 56 | |
| 57 | t_addr = __va(phys_addr); |
| 58 | t_end = t_addr + (size - 1); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 59 | |
| 60 | for (page = virt_to_page(t_addr); |
| 61 | page <= virt_to_page(t_end); page++) |
| 62 | if (!PageReserved(page)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | return NULL; |
| 64 | } |
| 65 | |
Thomas Gleixner | a403434 | 2008-01-30 13:34:04 +0100 | [diff] [blame] | 66 | prot = MAKE_GLOBAL(__PAGE_KERNEL | flags); |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 67 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | /* |
| 69 | * Mappings have to be page-aligned |
| 70 | */ |
| 71 | offset = phys_addr & ~PAGE_MASK; |
| 72 | phys_addr &= PAGE_MASK; |
| 73 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 74 | |
| 75 | /* |
| 76 | * Ok, go for it.. |
| 77 | */ |
| 78 | area = get_vm_area(size, VM_IOREMAP | (flags << 20)); |
| 79 | if (!area) |
| 80 | return NULL; |
| 81 | area->phys_addr = phys_addr; |
| 82 | addr = (void __iomem *) area->addr; |
| 83 | if (ioremap_page_range((unsigned long) addr, |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 84 | (unsigned long) addr + size, phys_addr, prot)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | vunmap((void __force *) addr); |
| 86 | return NULL; |
| 87 | } |
| 88 | return (void __iomem *) (offset + (char __iomem *)addr); |
| 89 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 90 | EXPORT_SYMBOL(__ioremap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
| 92 | /** |
| 93 | * ioremap_nocache - map bus memory into CPU space |
| 94 | * @offset: bus address of the memory |
| 95 | * @size: size of the resource to map |
| 96 | * |
| 97 | * ioremap_nocache performs a platform specific sequence of operations to |
| 98 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 99 | * writew/writel functions and the other mmio helpers. The returned |
| 100 | * address is not guaranteed to be usable directly as a virtual |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 101 | * address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | * |
| 103 | * This version of ioremap ensures that the memory is marked uncachable |
| 104 | * on the CPU as well as honouring existing caching rules from things like |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 105 | * the PCI bus. Note that there are other caches and buffers on many |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | * busses. In particular driver authors should read up on PCI writes |
| 107 | * |
| 108 | * It's useful if some control registers are in such an area and |
| 109 | * write combining or read caching is not desirable: |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 110 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | * Must be freed with iounmap. |
| 112 | */ |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 113 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | { |
| 115 | unsigned long last_addr; |
Siddha, Suresh B | 4138cc3 | 2008-01-30 13:33:43 +0100 | [diff] [blame] | 116 | void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 117 | |
| 118 | if (!p) |
| 119 | return p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
| 121 | /* Guaranteed to be > phys_addr, as per __ioremap() */ |
| 122 | last_addr = phys_addr + size - 1; |
| 123 | |
| 124 | if (last_addr < virt_to_phys(high_memory) - 1) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 125 | struct page *ppage = virt_to_page(__va(phys_addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | unsigned long npages; |
| 127 | |
| 128 | phys_addr &= PAGE_MASK; |
| 129 | |
| 130 | /* This might overflow and become zero.. */ |
| 131 | last_addr = PAGE_ALIGN(last_addr); |
| 132 | |
| 133 | /* .. but that's ok, because modulo-2**n arithmetic will make |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 134 | * the page-aligned "last - first" come out right. |
| 135 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | npages = (last_addr - phys_addr) >> PAGE_SHIFT; |
| 137 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 138 | if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { |
| 139 | iounmap(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | p = NULL; |
| 141 | } |
| 142 | global_flush_tlb(); |
| 143 | } |
| 144 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 145 | return p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 147 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 149 | /** |
| 150 | * iounmap - Free a IO remapping |
| 151 | * @addr: virtual address from ioremap_* |
| 152 | * |
| 153 | * Caller must ensure there is only one unmapping for the same pointer. |
| 154 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | void iounmap(volatile void __iomem *addr) |
| 156 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 157 | struct vm_struct *p, *o; |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 158 | |
| 159 | if ((void __force *)addr <= high_memory) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | return; |
| 161 | |
| 162 | /* |
| 163 | * __ioremap special-cases the PCI/ISA range by not instantiating a |
| 164 | * vm_area and by simply returning an address into the kernel mapping |
| 165 | * of ISA space. So handle that here. |
| 166 | */ |
| 167 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 168 | addr < phys_to_virt(ISA_END_ADDRESS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | return; |
| 170 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 171 | addr = (volatile void __iomem *) |
| 172 | (PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 173 | |
| 174 | /* Use the vm area unlocked, assuming the caller |
| 175 | ensures there isn't another iounmap for the same address |
| 176 | in parallel. Reuse of the virtual address is prevented by |
| 177 | leaving it in the global lists until we're done with it. |
| 178 | cpa takes care of the direct mappings. */ |
| 179 | read_lock(&vmlist_lock); |
| 180 | for (p = vmlist; p; p = p->next) { |
| 181 | if (p->addr == addr) |
| 182 | break; |
| 183 | } |
| 184 | read_unlock(&vmlist_lock); |
| 185 | |
| 186 | if (!p) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 187 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 188 | dump_stack(); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 189 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | } |
| 191 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 192 | /* Reset the direct mapping. Can block */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | change_page_attr(virt_to_page(__va(p->phys_addr)), |
Jeremy Fitzhardinge | 9585116 | 2007-07-21 17:11:35 +0200 | [diff] [blame] | 195 | get_vm_area_size(p) >> PAGE_SHIFT, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | PAGE_KERNEL); |
| 197 | global_flush_tlb(); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 198 | } |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 199 | |
| 200 | /* Finally remove it */ |
| 201 | o = remove_vm_area((void *)addr); |
| 202 | BUG_ON(p != o || o == NULL); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 203 | kfree(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 205 | EXPORT_SYMBOL(iounmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 207 | |
| 208 | int __initdata early_ioremap_debug; |
| 209 | |
| 210 | static int __init early_ioremap_debug_setup(char *str) |
| 211 | { |
| 212 | early_ioremap_debug = 1; |
| 213 | |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 214 | return 0; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 215 | } |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 216 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 217 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 218 | static __initdata int after_paging_init; |
| 219 | static __initdata unsigned long bm_pte[1024] |
| 220 | __attribute__((aligned(PAGE_SIZE))); |
| 221 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 222 | static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 223 | { |
| 224 | return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); |
| 225 | } |
| 226 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 227 | static inline unsigned long * __init early_ioremap_pte(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 228 | { |
| 229 | return bm_pte + ((addr >> PAGE_SHIFT) & 1023); |
| 230 | } |
| 231 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 232 | void __init early_ioremap_init(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 233 | { |
| 234 | unsigned long *pgd; |
| 235 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 236 | if (early_ioremap_debug) |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 237 | printk(KERN_DEBUG "early_ioremap_init()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 238 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 239 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 240 | *pgd = __pa(bm_pte) | _PAGE_TABLE; |
| 241 | memset(bm_pte, 0, sizeof(bm_pte)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 242 | /* |
| 243 | * The boot-ioremap range spans multiple pgds, for which |
| 244 | * we are not prepared: |
| 245 | */ |
| 246 | if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { |
| 247 | WARN_ON(1); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 248 | printk(KERN_WARNING "pgd %p != %p\n", |
| 249 | pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); |
| 250 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
| 251 | fix_to_virt(FIX_BTMAP_BEGIN)); |
| 252 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
| 253 | fix_to_virt(FIX_BTMAP_END)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 254 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 255 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 256 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", |
| 257 | FIX_BTMAP_BEGIN); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 258 | } |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 259 | } |
| 260 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 261 | void __init early_ioremap_clear(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 262 | { |
| 263 | unsigned long *pgd; |
| 264 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 265 | if (early_ioremap_debug) |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 266 | printk(KERN_DEBUG "early_ioremap_clear()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 267 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 268 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 269 | *pgd = 0; |
| 270 | __flush_tlb_all(); |
| 271 | } |
| 272 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 273 | void __init early_ioremap_reset(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 274 | { |
| 275 | enum fixed_addresses idx; |
| 276 | unsigned long *pte, phys, addr; |
| 277 | |
| 278 | after_paging_init = 1; |
Huang, Ying | 64a8f85 | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 279 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 280 | addr = fix_to_virt(idx); |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 281 | pte = early_ioremap_pte(addr); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 282 | if (!*pte & _PAGE_PRESENT) { |
| 283 | phys = *pte & PAGE_MASK; |
| 284 | set_fixmap(idx, phys); |
| 285 | } |
| 286 | } |
| 287 | } |
| 288 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 289 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 290 | unsigned long phys, pgprot_t flags) |
| 291 | { |
| 292 | unsigned long *pte, addr = __fix_to_virt(idx); |
| 293 | |
| 294 | if (idx >= __end_of_fixed_addresses) { |
| 295 | BUG(); |
| 296 | return; |
| 297 | } |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 298 | pte = early_ioremap_pte(addr); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 299 | if (pgprot_val(flags)) |
| 300 | *pte = (phys & PAGE_MASK) | pgprot_val(flags); |
| 301 | else |
| 302 | *pte = 0; |
| 303 | __flush_tlb_one(addr); |
| 304 | } |
| 305 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 306 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 307 | unsigned long phys) |
| 308 | { |
| 309 | if (after_paging_init) |
| 310 | set_fixmap(idx, phys); |
| 311 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 312 | __early_set_fixmap(idx, phys, PAGE_KERNEL); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 313 | } |
| 314 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 315 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 316 | { |
| 317 | if (after_paging_init) |
| 318 | clear_fixmap(idx); |
| 319 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 320 | __early_set_fixmap(idx, 0, __pgprot(0)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 321 | } |
| 322 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 323 | |
| 324 | int __initdata early_ioremap_nested; |
| 325 | |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 326 | static int __init check_early_ioremap_leak(void) |
| 327 | { |
| 328 | if (!early_ioremap_nested) |
| 329 | return 0; |
| 330 | |
| 331 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 332 | "Debug warning: early ioremap leak of %d areas detected.\n", |
| 333 | early_ioremap_nested); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 334 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 335 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 336 | WARN_ON(1); |
| 337 | |
| 338 | return 1; |
| 339 | } |
| 340 | late_initcall(check_early_ioremap_leak); |
| 341 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 342 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { |
| 344 | unsigned long offset, last_addr; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 345 | unsigned int nrpages, nesting; |
| 346 | enum fixed_addresses idx0, idx; |
| 347 | |
| 348 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 349 | |
| 350 | nesting = early_ioremap_nested; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 351 | if (early_ioremap_debug) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 352 | printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", |
| 353 | phys_addr, size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 354 | dump_stack(); |
| 355 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
| 357 | /* Don't allow wraparound or zero size */ |
| 358 | last_addr = phys_addr + size - 1; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 359 | if (!size || last_addr < phys_addr) { |
| 360 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 362 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 364 | if (nesting >= FIX_BTMAPS_NESTING) { |
| 365 | WARN_ON(1); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 366 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 367 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 368 | early_ioremap_nested++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | /* |
| 370 | * Mappings have to be page-aligned |
| 371 | */ |
| 372 | offset = phys_addr & ~PAGE_MASK; |
| 373 | phys_addr &= PAGE_MASK; |
| 374 | size = PAGE_ALIGN(last_addr) - phys_addr; |
| 375 | |
| 376 | /* |
| 377 | * Mappings have to fit in the FIX_BTMAP area. |
| 378 | */ |
| 379 | nrpages = size >> PAGE_SHIFT; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 380 | if (nrpages > NR_FIX_BTMAPS) { |
| 381 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 383 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 384 | |
| 385 | /* |
| 386 | * Ok, go for it.. |
| 387 | */ |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 388 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
| 389 | idx = idx0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 391 | early_set_fixmap(idx, phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | phys_addr += PAGE_SIZE; |
| 393 | --idx; |
| 394 | --nrpages; |
| 395 | } |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 396 | if (early_ioremap_debug) |
| 397 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 398 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 399 | return (void *) (offset + fix_to_virt(idx0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | } |
| 401 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 402 | void __init early_iounmap(void *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | { |
| 404 | unsigned long virt_addr; |
| 405 | unsigned long offset; |
| 406 | unsigned int nrpages; |
| 407 | enum fixed_addresses idx; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 408 | unsigned int nesting; |
| 409 | |
| 410 | nesting = --early_ioremap_nested; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 411 | WARN_ON(nesting < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 413 | if (early_ioremap_debug) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame^] | 414 | printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, |
| 415 | size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 416 | dump_stack(); |
| 417 | } |
| 418 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | virt_addr = (unsigned long)addr; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 420 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
| 421 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | return; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 423 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | offset = virt_addr & ~PAGE_MASK; |
| 425 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; |
| 426 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 427 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 429 | early_clear_fixmap(idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | --idx; |
| 431 | --nrpages; |
| 432 | } |
| 433 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 434 | |
| 435 | void __this_fixmap_does_not_exist(void) |
| 436 | { |
| 437 | WARN_ON(1); |
| 438 | } |