Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. |
| 3 | * This is needed for high PCI addresses that aren't mapped in the |
| 4 | * 640k-1MB IO memory area on PC's |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | */ |
| 8 | |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 9 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/init.h> |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 11 | #include <linux/io.h> |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 16 | #include <asm/cacheflush.h> |
| 17 | #include <asm/e820.h> |
| 18 | #include <asm/fixmap.h> |
| 19 | #include <asm/pgtable.h> |
| 20 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | f6df72e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 21 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 23 | enum ioremap_mode { |
| 24 | IOR_MODE_UNCACHED, |
| 25 | IOR_MODE_CACHED, |
| 26 | }; |
| 27 | |
Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 28 | #ifdef CONFIG_X86_64 |
| 29 | |
| 30 | unsigned long __phys_addr(unsigned long x) |
| 31 | { |
| 32 | if (x >= __START_KERNEL_map) |
| 33 | return x - __START_KERNEL_map + phys_base; |
| 34 | return x - PAGE_OFFSET; |
| 35 | } |
| 36 | EXPORT_SYMBOL(__phys_addr); |
| 37 | |
| 38 | #endif |
| 39 | |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 40 | int page_is_ram(unsigned long pagenr) |
| 41 | { |
| 42 | unsigned long addr, end; |
| 43 | int i; |
| 44 | |
| 45 | for (i = 0; i < e820.nr_map; i++) { |
| 46 | /* |
| 47 | * Not usable memory: |
| 48 | */ |
| 49 | if (e820.map[i].type != E820_RAM) |
| 50 | continue; |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 51 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; |
| 52 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; |
Thomas Gleixner | 950f9d9 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 53 | |
| 54 | /* |
| 55 | * Sanity check: Some BIOSen report areas as RAM that |
| 56 | * are not. Notably the 640->1Mb area, which is the |
| 57 | * PCI BIOS area. |
| 58 | */ |
| 59 | if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) && |
| 60 | end < (BIOS_END >> PAGE_SHIFT)) |
| 61 | continue; |
| 62 | |
Thomas Gleixner | 5f5192b | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 63 | if ((pagenr >= addr) && (pagenr < end)) |
| 64 | return 1; |
| 65 | } |
| 66 | return 0; |
| 67 | } |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | /* |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 70 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
| 71 | * conflicts. |
| 72 | */ |
Thomas Gleixner | 75ab43b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 73 | static int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 74 | enum ioremap_mode mode) |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 75 | { |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 76 | unsigned long nrpages = size >> PAGE_SHIFT; |
Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 77 | int err; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 78 | |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 79 | switch (mode) { |
| 80 | case IOR_MODE_UNCACHED: |
| 81 | default: |
| 82 | err = set_memory_uc(vaddr, nrpages); |
| 83 | break; |
| 84 | case IOR_MODE_CACHED: |
| 85 | err = set_memory_wb(vaddr, nrpages); |
| 86 | break; |
| 87 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 88 | |
| 89 | return err; |
| 90 | } |
| 91 | |
| 92 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | * Remap an arbitrary physical address space into the kernel virtual |
| 94 | * address space. Needed when the kernel wants to access high addresses |
| 95 | * directly. |
| 96 | * |
| 97 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 98 | * have to convert them into an offset in a page-aligned mapping, but the |
| 99 | * caller shouldn't need to know that small detail. |
| 100 | */ |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 101 | static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 102 | enum ioremap_mode mode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | { |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 104 | unsigned long pfn, offset, last_addr, vaddr; |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 105 | struct vm_struct *area; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 106 | pgprot_t prot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | /* Don't allow wraparound or zero size */ |
| 109 | last_addr = phys_addr + size - 1; |
| 110 | if (!size || last_addr < phys_addr) |
| 111 | return NULL; |
| 112 | |
| 113 | /* |
| 114 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 115 | */ |
| 116 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
Thomas Gleixner | 4b40fce | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 117 | return (__force void __iomem *)phys_to_virt(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Don't allow anybody to remap normal RAM that we're using.. |
| 121 | */ |
Ingo Molnar | 38cb47b | 2008-02-04 16:47:54 +0100 | [diff] [blame] | 122 | for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped && |
| 123 | (pfn << PAGE_SHIFT) < last_addr; pfn++) { |
| 124 | if (page_is_ram(pfn) && pfn_valid(pfn) && |
| 125 | !PageReserved(pfn_to_page(pfn))) |
Thomas Gleixner | 266b9f8 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 126 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |
| 128 | |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 129 | switch (mode) { |
| 130 | case IOR_MODE_UNCACHED: |
| 131 | default: |
| 132 | prot = PAGE_KERNEL_NOCACHE; |
| 133 | break; |
| 134 | case IOR_MODE_CACHED: |
| 135 | prot = PAGE_KERNEL; |
| 136 | break; |
| 137 | } |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 138 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | /* |
| 140 | * Mappings have to be page-aligned |
| 141 | */ |
| 142 | offset = phys_addr & ~PAGE_MASK; |
| 143 | phys_addr &= PAGE_MASK; |
| 144 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 145 | |
| 146 | /* |
| 147 | * Ok, go for it.. |
| 148 | */ |
Thomas Gleixner | 74ff285 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 149 | area = get_vm_area(size, VM_IOREMAP); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | if (!area) |
| 151 | return NULL; |
| 152 | area->phys_addr = phys_addr; |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 153 | vaddr = (unsigned long) area->addr; |
| 154 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { |
| 155 | remove_vm_area((void *)(vaddr & PAGE_MASK)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | return NULL; |
| 157 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 158 | |
Thomas Gleixner | 75ab43b | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 159 | if (ioremap_change_attr(vaddr, size, mode) < 0) { |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 160 | vunmap(area->addr); |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 161 | return NULL; |
| 162 | } |
| 163 | |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 164 | return (void __iomem *) (vaddr + offset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | |
| 167 | /** |
| 168 | * ioremap_nocache - map bus memory into CPU space |
| 169 | * @offset: bus address of the memory |
| 170 | * @size: size of the resource to map |
| 171 | * |
| 172 | * ioremap_nocache performs a platform specific sequence of operations to |
| 173 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 174 | * writew/writel functions and the other mmio helpers. The returned |
| 175 | * address is not guaranteed to be usable directly as a virtual |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 176 | * address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | * |
| 178 | * This version of ioremap ensures that the memory is marked uncachable |
| 179 | * on the CPU as well as honouring existing caching rules from things like |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 180 | * the PCI bus. Note that there are other caches and buffers on many |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | * busses. In particular driver authors should read up on PCI writes |
| 182 | * |
| 183 | * It's useful if some control registers are in such an area and |
| 184 | * write combining or read caching is not desirable: |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 185 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | * Must be freed with iounmap. |
| 187 | */ |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 188 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | { |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 190 | return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 192 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 194 | void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) |
| 195 | { |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 196 | return __ioremap(phys_addr, size, IOR_MODE_CACHED); |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 197 | } |
| 198 | EXPORT_SYMBOL(ioremap_cache); |
| 199 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 200 | /** |
| 201 | * iounmap - Free a IO remapping |
| 202 | * @addr: virtual address from ioremap_* |
| 203 | * |
| 204 | * Caller must ensure there is only one unmapping for the same pointer. |
| 205 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | void iounmap(volatile void __iomem *addr) |
| 207 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 208 | struct vm_struct *p, *o; |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 209 | |
| 210 | if ((void __force *)addr <= high_memory) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | return; |
| 212 | |
| 213 | /* |
| 214 | * __ioremap special-cases the PCI/ISA range by not instantiating a |
| 215 | * vm_area and by simply returning an address into the kernel mapping |
| 216 | * of ISA space. So handle that here. |
| 217 | */ |
| 218 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 219 | addr < phys_to_virt(ISA_END_ADDRESS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | return; |
| 221 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 222 | addr = (volatile void __iomem *) |
| 223 | (PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 224 | |
| 225 | /* Use the vm area unlocked, assuming the caller |
| 226 | ensures there isn't another iounmap for the same address |
| 227 | in parallel. Reuse of the virtual address is prevented by |
| 228 | leaving it in the global lists until we're done with it. |
| 229 | cpa takes care of the direct mappings. */ |
| 230 | read_lock(&vmlist_lock); |
| 231 | for (p = vmlist; p; p = p->next) { |
| 232 | if (p->addr == addr) |
| 233 | break; |
| 234 | } |
| 235 | read_unlock(&vmlist_lock); |
| 236 | |
| 237 | if (!p) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 238 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
Andrew Morton | c23a4e96 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 239 | dump_stack(); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 240 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | } |
| 242 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 243 | /* Finally remove it */ |
| 244 | o = remove_vm_area((void *)addr); |
| 245 | BUG_ON(p != o || o == NULL); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 246 | kfree(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 248 | EXPORT_SYMBOL(iounmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 250 | #ifdef CONFIG_X86_32 |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 251 | |
| 252 | int __initdata early_ioremap_debug; |
| 253 | |
| 254 | static int __init early_ioremap_debug_setup(char *str) |
| 255 | { |
| 256 | early_ioremap_debug = 1; |
| 257 | |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 258 | return 0; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 259 | } |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 260 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 261 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 262 | static __initdata int after_paging_init; |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 263 | static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 264 | __attribute__((aligned(PAGE_SIZE))); |
| 265 | |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 266 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 267 | { |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 268 | pgd_t *pgd = &swapper_pg_dir[pgd_index(addr)]; |
| 269 | pud_t *pud = pud_offset(pgd, addr); |
| 270 | pmd_t *pmd = pmd_offset(pud, addr); |
| 271 | |
| 272 | return pmd; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 273 | } |
| 274 | |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 275 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 276 | { |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 277 | return &bm_pte[pte_index(addr)]; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 278 | } |
| 279 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 280 | void __init early_ioremap_init(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 281 | { |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 282 | pmd_t *pmd; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 283 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 284 | if (early_ioremap_debug) |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 285 | printk(KERN_INFO "early_ioremap_init()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 286 | |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 287 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 288 | memset(bm_pte, 0, sizeof(bm_pte)); |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 289 | set_pmd(pmd, __pmd(__pa(bm_pte) | _PAGE_TABLE)); |
| 290 | |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 291 | /* |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 292 | * The boot-ioremap range spans multiple pmds, for which |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 293 | * we are not prepared: |
| 294 | */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 295 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 296 | WARN_ON(1); |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 297 | printk(KERN_WARNING "pmd %p != %p\n", |
| 298 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 299 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 300 | fix_to_virt(FIX_BTMAP_BEGIN)); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 301 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 302 | fix_to_virt(FIX_BTMAP_END)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 303 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 304 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 305 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", |
| 306 | FIX_BTMAP_BEGIN); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 307 | } |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 308 | } |
| 309 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 310 | void __init early_ioremap_clear(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 311 | { |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 312 | pmd_t *pmd; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 313 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 314 | if (early_ioremap_debug) |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 315 | printk(KERN_INFO "early_ioremap_clear()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 316 | |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 317 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
| 318 | pmd_clear(pmd); |
| 319 | paravirt_release_pt(__pa(pmd) >> PAGE_SHIFT); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 320 | __flush_tlb_all(); |
| 321 | } |
| 322 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 323 | void __init early_ioremap_reset(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 324 | { |
| 325 | enum fixed_addresses idx; |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 326 | unsigned long addr, phys; |
| 327 | pte_t *pte; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 328 | |
| 329 | after_paging_init = 1; |
Huang, Ying | 64a8f85 | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 330 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 331 | addr = fix_to_virt(idx); |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 332 | pte = early_ioremap_pte(addr); |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 333 | if (pte_present(*pte)) { |
| 334 | phys = pte_val(*pte) & PAGE_MASK; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 335 | set_fixmap(idx, phys); |
| 336 | } |
| 337 | } |
| 338 | } |
| 339 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 340 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 341 | unsigned long phys, pgprot_t flags) |
| 342 | { |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 343 | unsigned long addr = __fix_to_virt(idx); |
| 344 | pte_t *pte; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 345 | |
| 346 | if (idx >= __end_of_fixed_addresses) { |
| 347 | BUG(); |
| 348 | return; |
| 349 | } |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 350 | pte = early_ioremap_pte(addr); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 351 | if (pgprot_val(flags)) |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 352 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 353 | else |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame^] | 354 | pte_clear(NULL, addr, pte); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 355 | __flush_tlb_one(addr); |
| 356 | } |
| 357 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 358 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 359 | unsigned long phys) |
| 360 | { |
| 361 | if (after_paging_init) |
| 362 | set_fixmap(idx, phys); |
| 363 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 364 | __early_set_fixmap(idx, phys, PAGE_KERNEL); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 365 | } |
| 366 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 367 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 368 | { |
| 369 | if (after_paging_init) |
| 370 | clear_fixmap(idx); |
| 371 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 372 | __early_set_fixmap(idx, 0, __pgprot(0)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 373 | } |
| 374 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 375 | |
| 376 | int __initdata early_ioremap_nested; |
| 377 | |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 378 | static int __init check_early_ioremap_leak(void) |
| 379 | { |
| 380 | if (!early_ioremap_nested) |
| 381 | return 0; |
| 382 | |
| 383 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 384 | "Debug warning: early ioremap leak of %d areas detected.\n", |
| 385 | early_ioremap_nested); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 386 | printk(KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 387 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 388 | WARN_ON(1); |
| 389 | |
| 390 | return 1; |
| 391 | } |
| 392 | late_initcall(check_early_ioremap_leak); |
| 393 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 394 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | { |
| 396 | unsigned long offset, last_addr; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 397 | unsigned int nrpages, nesting; |
| 398 | enum fixed_addresses idx0, idx; |
| 399 | |
| 400 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 401 | |
| 402 | nesting = early_ioremap_nested; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 403 | if (early_ioremap_debug) { |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 404 | printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ", |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 405 | phys_addr, size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 406 | dump_stack(); |
| 407 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | |
| 409 | /* Don't allow wraparound or zero size */ |
| 410 | last_addr = phys_addr + size - 1; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 411 | if (!size || last_addr < phys_addr) { |
| 412 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 414 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 416 | if (nesting >= FIX_BTMAPS_NESTING) { |
| 417 | WARN_ON(1); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 418 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 419 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 420 | early_ioremap_nested++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | /* |
| 422 | * Mappings have to be page-aligned |
| 423 | */ |
| 424 | offset = phys_addr & ~PAGE_MASK; |
| 425 | phys_addr &= PAGE_MASK; |
| 426 | size = PAGE_ALIGN(last_addr) - phys_addr; |
| 427 | |
| 428 | /* |
| 429 | * Mappings have to fit in the FIX_BTMAP area. |
| 430 | */ |
| 431 | nrpages = size >> PAGE_SHIFT; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 432 | if (nrpages > NR_FIX_BTMAPS) { |
| 433 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 434 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 435 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | |
| 437 | /* |
| 438 | * Ok, go for it.. |
| 439 | */ |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 440 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
| 441 | idx = idx0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 443 | early_set_fixmap(idx, phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | phys_addr += PAGE_SIZE; |
| 445 | --idx; |
| 446 | --nrpages; |
| 447 | } |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 448 | if (early_ioremap_debug) |
| 449 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 450 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 451 | return (void *) (offset + fix_to_virt(idx0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } |
| 453 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 454 | void __init early_iounmap(void *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | { |
| 456 | unsigned long virt_addr; |
| 457 | unsigned long offset; |
| 458 | unsigned int nrpages; |
| 459 | enum fixed_addresses idx; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 460 | unsigned int nesting; |
| 461 | |
| 462 | nesting = --early_ioremap_nested; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 463 | WARN_ON(nesting < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 465 | if (early_ioremap_debug) { |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 466 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 467 | size, nesting); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 468 | dump_stack(); |
| 469 | } |
| 470 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | virt_addr = (unsigned long)addr; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 472 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
| 473 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | return; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 475 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | offset = virt_addr & ~PAGE_MASK; |
| 477 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; |
| 478 | |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 479 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 481 | early_clear_fixmap(idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | --idx; |
| 483 | --nrpages; |
| 484 | } |
| 485 | } |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 486 | |
| 487 | void __this_fixmap_does_not_exist(void) |
| 488 | { |
| 489 | WARN_ON(1); |
| 490 | } |
Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 491 | |
| 492 | #endif /* CONFIG_X86_32 */ |