Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/ioremap.c |
| 3 | * |
| 4 | * Re-map IO memory to kernel address space so that we can access it. |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | * |
| 8 | * Hacked for ARM by Phil Blundell <philb@gnu.org> |
| 9 | * Hacked to allow all architectures to build, and various cleanups |
| 10 | * by Russell King |
| 11 | * |
| 12 | * This allows a driver to remap an arbitrary region of bus memory into |
| 13 | * virtual space. One should *only* use readl, writel, memcpy_toio and |
| 14 | * so on with such remapped areas. |
| 15 | * |
| 16 | * Because the ARM only has a 32-bit address space we can't address the |
| 17 | * whole of the (physical) PCI space at once. PCI huge-mode addressing |
| 18 | * allows us to circumvent this restriction by splitting PCI space into |
| 19 | * two 2GB chunks and mapping only one at a time into processor memory. |
| 20 | * We use MMU protection domains to trap any attempt to access the bank |
| 21 | * that is not currently mapped. (This isn't fully implemented yet.) |
| 22 | */ |
| 23 | #include <linux/module.h> |
| 24 | #include <linux/errno.h> |
| 25 | #include <linux/mm.h> |
| 26 | #include <linux/vmalloc.h> |
Russell King | fced80c | 2008-09-06 12:10:45 +0100 | [diff] [blame] | 27 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 29 | #include <asm/cputype.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | #include <asm/cacheflush.h> |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 31 | #include <asm/mmu_context.h> |
| 32 | #include <asm/pgalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | #include <asm/tlbflush.h> |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 34 | #include <asm/sizes.h> |
| 35 | |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 36 | #include <asm/mach/map.h> |
| 37 | #include "mm.h" |
| 38 | |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 39 | /* |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped |
| 41 | * I/O regions in vm_struct->flags field. |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 42 | */ |
| 43 | #define VM_ARM_SECTION_MAPPING 0x80000000 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 45 | int ioremap_page(unsigned long virt, unsigned long phys, |
| 46 | const struct mem_type *mtype) |
| 47 | { |
Russell King | d746196 | 2010-07-26 10:29:13 +0100 | [diff] [blame] | 48 | return ioremap_page_range(virt, virt + PAGE_SIZE, phys, |
| 49 | __pgprot(mtype->prot_pte)); |
Hiroshi DOYU | 69d3a84 | 2009-01-28 21:32:08 +0200 | [diff] [blame] | 50 | } |
| 51 | EXPORT_SYMBOL(ioremap_page); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 52 | |
| 53 | void __check_kvm_seq(struct mm_struct *mm) |
| 54 | { |
| 55 | unsigned int seq; |
| 56 | |
| 57 | do { |
| 58 | seq = init_mm.context.kvm_seq; |
| 59 | memcpy(pgd_offset(mm, VMALLOC_START), |
| 60 | pgd_offset_k(VMALLOC_START), |
| 61 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - |
| 62 | pgd_index(VMALLOC_START))); |
| 63 | mm->context.kvm_seq = seq; |
| 64 | } while (seq != init_mm.context.kvm_seq); |
| 65 | } |
| 66 | |
| 67 | #ifndef CONFIG_SMP |
| 68 | /* |
| 69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, |
| 70 | * the other CPUs will not see this change until their next context switch. |
| 71 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs |
| 72 | * which requires the new ioremap'd region to be referenced, the CPU will |
| 73 | * reference the _old_ region. |
| 74 | * |
Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 75 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
| 76 | * mask the size back to 1MB aligned or we will overflow in the loop below. |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 77 | */ |
| 78 | static void unmap_area_sections(unsigned long virt, unsigned long size) |
| 79 | { |
Russell King | 24f11ec | 2009-01-25 17:36:34 +0000 | [diff] [blame] | 80 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 81 | pgd_t *pgd; |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 82 | pud_t *pud; |
| 83 | pmd_t *pmdp; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 84 | |
| 85 | flush_cache_vunmap(addr, end); |
| 86 | pgd = pgd_offset_k(addr); |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 87 | pud = pud_offset(pgd, addr); |
| 88 | pmdp = pmd_offset(pud, addr); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 89 | do { |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 90 | pmd_t pmd = *pmdp; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 91 | |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 92 | if (!pmd_none(pmd)) { |
| 93 | /* |
| 94 | * Clear the PMD from the page table, and |
| 95 | * increment the kvm sequence so others |
| 96 | * notice this change. |
| 97 | * |
| 98 | * Note: this is still racy on SMP machines. |
| 99 | */ |
| 100 | pmd_clear(pmdp); |
| 101 | init_mm.context.kvm_seq++; |
| 102 | |
| 103 | /* |
| 104 | * Free the page table, if there was one. |
| 105 | */ |
| 106 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 107 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 108 | } |
| 109 | |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 110 | addr += PMD_SIZE; |
| 111 | pmdp += 2; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 112 | } while (addr < end); |
| 113 | |
| 114 | /* |
| 115 | * Ensure that the active_mm is up to date - we want to |
| 116 | * catch any use-after-iounmap cases. |
| 117 | */ |
| 118 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) |
| 119 | __check_kvm_seq(current->active_mm); |
| 120 | |
| 121 | flush_tlb_kernel_range(virt, end); |
| 122 | } |
| 123 | |
| 124 | static int |
| 125 | remap_area_sections(unsigned long virt, unsigned long pfn, |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 126 | size_t size, const struct mem_type *type) |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 127 | { |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 128 | unsigned long addr = virt, end = virt + size; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 129 | pgd_t *pgd; |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 130 | pud_t *pud; |
| 131 | pmd_t *pmd; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 132 | |
| 133 | /* |
| 134 | * Remove and free any PTE-based mapping, and |
| 135 | * sync the current kernel mapping. |
| 136 | */ |
| 137 | unmap_area_sections(virt, size); |
| 138 | |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 139 | pgd = pgd_offset_k(addr); |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 140 | pud = pud_offset(pgd, addr); |
| 141 | pmd = pmd_offset(pud, addr); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 142 | do { |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 143 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 144 | pfn += SZ_1M >> PAGE_SHIFT; |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 145 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 146 | pfn += SZ_1M >> PAGE_SHIFT; |
| 147 | flush_pmd_entry(pmd); |
| 148 | |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 149 | addr += PMD_SIZE; |
| 150 | pmd += 2; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 151 | } while (addr < end); |
| 152 | |
| 153 | return 0; |
| 154 | } |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 155 | |
| 156 | static int |
| 157 | remap_area_supersections(unsigned long virt, unsigned long pfn, |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 158 | size_t size, const struct mem_type *type) |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 159 | { |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 160 | unsigned long addr = virt, end = virt + size; |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 161 | pgd_t *pgd; |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 162 | pud_t *pud; |
| 163 | pmd_t *pmd; |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 164 | |
| 165 | /* |
| 166 | * Remove and free any PTE-based mapping, and |
| 167 | * sync the current kernel mapping. |
| 168 | */ |
| 169 | unmap_area_sections(virt, size); |
| 170 | |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 171 | pgd = pgd_offset_k(virt); |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 172 | pud = pud_offset(pgd, addr); |
| 173 | pmd = pmd_offset(pud, addr); |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 174 | do { |
| 175 | unsigned long super_pmd_val, i; |
| 176 | |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 177 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | |
| 178 | PMD_SECT_SUPER; |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 179 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
| 180 | |
| 181 | for (i = 0; i < 8; i++) { |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 182 | pmd[0] = __pmd(super_pmd_val); |
| 183 | pmd[1] = __pmd(super_pmd_val); |
| 184 | flush_pmd_entry(pmd); |
| 185 | |
Catalin Marinas | 03a6b82 | 2011-11-22 17:30:27 +0000 | [diff] [blame^] | 186 | addr += PMD_SIZE; |
| 187 | pmd += 2; |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; |
| 191 | } while (addr < end); |
| 192 | |
| 193 | return 0; |
| 194 | } |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 195 | #endif |
| 196 | |
Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 197 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, |
| 198 | unsigned long offset, size_t size, unsigned int mtype, void *caller) |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 199 | { |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 200 | const struct mem_type *type; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 201 | int err; |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 202 | unsigned long addr; |
| 203 | struct vm_struct * area; |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 204 | |
| 205 | /* |
| 206 | * High mappings must be supersection aligned |
| 207 | */ |
| 208 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
| 209 | return NULL; |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 210 | |
Russell King | 309caa9 | 2010-06-21 21:03:18 +0100 | [diff] [blame] | 211 | /* |
| 212 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
| 213 | */ |
Russell King | 67cfa23 | 2010-12-21 10:42:20 +0000 | [diff] [blame] | 214 | if (WARN_ON(pfn_valid(pfn))) |
| 215 | return NULL; |
Russell King | 309caa9 | 2010-06-21 21:03:18 +0100 | [diff] [blame] | 216 | |
Russell King | 3603ab2 | 2007-05-05 20:59:27 +0100 | [diff] [blame] | 217 | type = get_mem_type(mtype); |
| 218 | if (!type) |
| 219 | return NULL; |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 220 | |
Russell King | 6d78b5f | 2007-06-03 19:26:04 +0100 | [diff] [blame] | 221 | /* |
| 222 | * Page align the mapping size, taking account of any offset. |
| 223 | */ |
| 224 | size = PAGE_ALIGN(offset + size); |
Russell King | c924aff | 2006-12-17 23:29:57 +0000 | [diff] [blame] | 225 | |
Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 226 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 227 | if (!area) |
| 228 | return NULL; |
| 229 | addr = (unsigned long)area->addr; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 230 | |
| 231 | #ifndef CONFIG_SMP |
Catalin Marinas | 412489a | 2007-01-25 14:16:47 +0100 | [diff] [blame] | 232 | if (DOMAIN_IO == 0 && |
| 233 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 234 | cpu_is_xsc3()) && pfn >= 0x100000 && |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 235 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { |
| 236 | area->flags |= VM_ARM_SECTION_MAPPING; |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 237 | err = remap_area_supersections(addr, pfn, size, type); |
Lennert Buytenhek | a069c89 | 2006-07-01 19:58:20 +0100 | [diff] [blame] | 238 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 239 | area->flags |= VM_ARM_SECTION_MAPPING; |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 240 | err = remap_area_sections(addr, pfn, size, type); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 241 | } else |
| 242 | #endif |
Russell King | d746196 | 2010-07-26 10:29:13 +0100 | [diff] [blame] | 243 | err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), |
| 244 | __pgprot(type->prot_pte)); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 245 | |
| 246 | if (err) { |
Catalin Marinas | 478922c | 2006-05-16 11:30:26 +0100 | [diff] [blame] | 247 | vunmap((void *)addr); |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 248 | return NULL; |
| 249 | } |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 250 | |
| 251 | flush_cache_vmap(addr, addr + size); |
| 252 | return (void __iomem *) (offset + addr); |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 253 | } |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 254 | |
Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 255 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
| 256 | unsigned int mtype, void *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | { |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 258 | unsigned long last_addr; |
| 259 | unsigned long offset = phys_addr & ~PAGE_MASK; |
| 260 | unsigned long pfn = __phys_to_pfn(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | |
Deepak Saxena | 9d4ae72 | 2006-01-09 19:23:11 +0000 | [diff] [blame] | 262 | /* |
| 263 | * Don't allow wraparound or zero size |
| 264 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | last_addr = phys_addr + size - 1; |
| 266 | if (!size || last_addr < phys_addr) |
| 267 | return NULL; |
| 268 | |
Russell King | 31aa8fd | 2009-12-18 11:10:03 +0000 | [diff] [blame] | 269 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
| 270 | caller); |
| 271 | } |
| 272 | |
| 273 | /* |
| 274 | * Remap an arbitrary physical address space into the kernel virtual |
| 275 | * address space. Needed when the kernel wants to access high addresses |
| 276 | * directly. |
| 277 | * |
| 278 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 279 | * have to convert them into an offset in a page-aligned mapping, but the |
| 280 | * caller shouldn't need to know that small detail. |
| 281 | */ |
| 282 | void __iomem * |
| 283 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, |
| 284 | unsigned int mtype) |
| 285 | { |
| 286 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
| 287 | __builtin_return_address(0)); |
| 288 | } |
| 289 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
| 290 | |
| 291 | void __iomem * |
| 292 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) |
| 293 | { |
| 294 | return __arm_ioremap_caller(phys_addr, size, mtype, |
| 295 | __builtin_return_address(0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | } |
Russell King | 3603ab2 | 2007-05-05 20:59:27 +0100 | [diff] [blame] | 297 | EXPORT_SYMBOL(__arm_ioremap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | |
Tony Lindgren | 6c5482d | 2011-10-12 01:02:50 +0100 | [diff] [blame] | 299 | /* |
| 300 | * Remap an arbitrary physical address space into the kernel virtual |
| 301 | * address space as memory. Needed when the kernel wants to execute |
| 302 | * code in external memory. This is needed for reprogramming source |
| 303 | * clocks that would affect normal memory for example. Please see |
| 304 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. |
| 305 | */ |
| 306 | void __iomem * |
| 307 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) |
| 308 | { |
| 309 | unsigned int mtype; |
| 310 | |
| 311 | if (cached) |
| 312 | mtype = MT_MEMORY; |
| 313 | else |
| 314 | mtype = MT_MEMORY_NONCACHED; |
| 315 | |
| 316 | return __arm_ioremap_caller(phys_addr, size, mtype, |
| 317 | __builtin_return_address(0)); |
| 318 | } |
| 319 | |
Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 320 | void __iounmap(volatile void __iomem *io_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | { |
Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 322 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
Catalin Marinas | ceaccbd | 2006-07-29 08:29:30 +0100 | [diff] [blame] | 323 | #ifndef CONFIG_SMP |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 324 | struct vm_struct **p, *tmp; |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 325 | |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 326 | /* |
| 327 | * If this is a section based mapping we need to handle it |
Simon Arlott | 6cbdc8c | 2007-05-11 20:40:30 +0100 | [diff] [blame] | 328 | * specially as the VM subsystem does not know how to handle |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 329 | * such a beast. We need the lock here b/c we need to clear |
| 330 | * all the mappings before the area can be reclaimed |
| 331 | * by someone else. |
| 332 | */ |
| 333 | write_lock(&vmlist_lock); |
| 334 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { |
Russell King | 09d9bae | 2008-09-05 14:08:44 +0100 | [diff] [blame] | 335 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 336 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 337 | unmap_area_sections((unsigned long)tmp->addr, |
| 338 | tmp->size); |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 339 | } |
| 340 | break; |
| 341 | } |
| 342 | } |
| 343 | write_unlock(&vmlist_lock); |
Lennert Buytenhek | 7cddc39 | 2006-07-03 12:26:02 +0100 | [diff] [blame] | 344 | #endif |
Russell King | ff0daca | 2006-06-29 20:17:15 +0100 | [diff] [blame] | 345 | |
Russell King | 24f11ec | 2009-01-25 17:36:34 +0000 | [diff] [blame] | 346 | vunmap(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | } |
| 348 | EXPORT_SYMBOL(__iounmap); |