Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Trapped io support |
| 3 | * |
| 4 | * Copyright (C) 2008 Magnus Damm |
| 5 | * |
| 6 | * Intercept io operations by trapping. |
| 7 | * |
| 8 | * This file is subject to the terms and conditions of the GNU General Public |
| 9 | * License. See the file "COPYING" in the main directory of this archive |
| 10 | * for more details. |
| 11 | */ |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/vmalloc.h> |
Paul Mundt | ecc14e8 | 2008-02-12 16:02:02 +0900 | [diff] [blame] | 16 | #include <linux/module.h> |
Paul Mundt | eeee785 | 2009-04-02 12:31:16 +0900 | [diff] [blame] | 17 | #include <linux/init.h> |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 18 | #include <asm/system.h> |
| 19 | #include <asm/mmu_context.h> |
| 20 | #include <asm/uaccess.h> |
| 21 | #include <asm/io.h> |
| 22 | #include <asm/io_trapped.h> |
| 23 | |
| 24 | #define TRAPPED_PAGES_MAX 16 |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 25 | |
| 26 | #ifdef CONFIG_HAS_IOPORT |
| 27 | LIST_HEAD(trapped_io); |
Paul Mundt | ecc14e8 | 2008-02-12 16:02:02 +0900 | [diff] [blame] | 28 | EXPORT_SYMBOL_GPL(trapped_io); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 29 | #endif |
| 30 | #ifdef CONFIG_HAS_IOMEM |
| 31 | LIST_HEAD(trapped_mem); |
Paul Mundt | ecc14e8 | 2008-02-12 16:02:02 +0900 | [diff] [blame] | 32 | EXPORT_SYMBOL_GPL(trapped_mem); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 33 | #endif |
| 34 | static DEFINE_SPINLOCK(trapped_lock); |
| 35 | |
Paul Mundt | eeee785 | 2009-04-02 12:31:16 +0900 | [diff] [blame] | 36 | static int trapped_io_disable __read_mostly; |
| 37 | |
| 38 | static int __init trapped_io_setup(char *__unused) |
| 39 | { |
| 40 | trapped_io_disable = 1; |
| 41 | return 1; |
| 42 | } |
| 43 | __setup("noiotrap", trapped_io_setup); |
| 44 | |
Paul Mundt | b2839ed | 2008-03-06 12:43:38 +0900 | [diff] [blame] | 45 | int register_trapped_io(struct trapped_io *tiop) |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 46 | { |
| 47 | struct resource *res; |
| 48 | unsigned long len = 0, flags = 0; |
| 49 | struct page *pages[TRAPPED_PAGES_MAX]; |
| 50 | int k, n; |
| 51 | |
Paul Mundt | eeee785 | 2009-04-02 12:31:16 +0900 | [diff] [blame] | 52 | if (unlikely(trapped_io_disable)) |
| 53 | return 0; |
| 54 | |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 55 | /* structure must be page aligned */ |
| 56 | if ((unsigned long)tiop & (PAGE_SIZE - 1)) |
| 57 | goto bad; |
| 58 | |
| 59 | for (k = 0; k < tiop->num_resources; k++) { |
| 60 | res = tiop->resource + k; |
| 61 | len += roundup((res->end - res->start) + 1, PAGE_SIZE); |
| 62 | flags |= res->flags; |
| 63 | } |
| 64 | |
| 65 | /* support IORESOURCE_IO _or_ MEM, not both */ |
| 66 | if (hweight_long(flags) != 1) |
| 67 | goto bad; |
| 68 | |
| 69 | n = len >> PAGE_SHIFT; |
| 70 | |
| 71 | if (n >= TRAPPED_PAGES_MAX) |
| 72 | goto bad; |
| 73 | |
| 74 | for (k = 0; k < n; k++) |
| 75 | pages[k] = virt_to_page(tiop); |
| 76 | |
| 77 | tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE); |
| 78 | if (!tiop->virt_base) |
| 79 | goto bad; |
| 80 | |
| 81 | len = 0; |
| 82 | for (k = 0; k < tiop->num_resources; k++) { |
| 83 | res = tiop->resource + k; |
| 84 | pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n", |
| 85 | (unsigned long)(tiop->virt_base + len), |
| 86 | res->flags & IORESOURCE_IO ? "io" : "mmio", |
| 87 | (unsigned long)res->start); |
| 88 | len += roundup((res->end - res->start) + 1, PAGE_SIZE); |
| 89 | } |
| 90 | |
| 91 | tiop->magic = IO_TRAPPED_MAGIC; |
| 92 | INIT_LIST_HEAD(&tiop->list); |
| 93 | spin_lock_irq(&trapped_lock); |
Paul Mundt | 86e4dd5 | 2010-05-25 20:06:13 +0900 | [diff] [blame] | 94 | #ifdef CONFIG_HAS_IOPORT |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 95 | if (flags & IORESOURCE_IO) |
| 96 | list_add(&tiop->list, &trapped_io); |
Paul Mundt | 86e4dd5 | 2010-05-25 20:06:13 +0900 | [diff] [blame] | 97 | #endif |
| 98 | #ifdef CONFIG_HAS_IOMEM |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 99 | if (flags & IORESOURCE_MEM) |
| 100 | list_add(&tiop->list, &trapped_mem); |
Paul Mundt | 86e4dd5 | 2010-05-25 20:06:13 +0900 | [diff] [blame] | 101 | #endif |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 102 | spin_unlock_irq(&trapped_lock); |
| 103 | |
| 104 | return 0; |
| 105 | bad: |
| 106 | pr_warning("unable to install trapped io filter\n"); |
| 107 | return -1; |
| 108 | } |
Paul Mundt | ecc14e8 | 2008-02-12 16:02:02 +0900 | [diff] [blame] | 109 | EXPORT_SYMBOL_GPL(register_trapped_io); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 110 | |
| 111 | void __iomem *match_trapped_io_handler(struct list_head *list, |
| 112 | unsigned long offset, |
| 113 | unsigned long size) |
| 114 | { |
| 115 | unsigned long voffs; |
| 116 | struct trapped_io *tiop; |
| 117 | struct resource *res; |
| 118 | int k, len; |
Stuart Menefy | fd78a76 | 2009-07-29 23:01:24 +0900 | [diff] [blame] | 119 | unsigned long flags; |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 120 | |
Stuart Menefy | fd78a76 | 2009-07-29 23:01:24 +0900 | [diff] [blame] | 121 | spin_lock_irqsave(&trapped_lock, flags); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 122 | list_for_each_entry(tiop, list, list) { |
| 123 | voffs = 0; |
| 124 | for (k = 0; k < tiop->num_resources; k++) { |
| 125 | res = tiop->resource + k; |
| 126 | if (res->start == offset) { |
Stuart Menefy | fd78a76 | 2009-07-29 23:01:24 +0900 | [diff] [blame] | 127 | spin_unlock_irqrestore(&trapped_lock, flags); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 128 | return tiop->virt_base + voffs; |
| 129 | } |
| 130 | |
| 131 | len = (res->end - res->start) + 1; |
| 132 | voffs += roundup(len, PAGE_SIZE); |
| 133 | } |
| 134 | } |
Stuart Menefy | fd78a76 | 2009-07-29 23:01:24 +0900 | [diff] [blame] | 135 | spin_unlock_irqrestore(&trapped_lock, flags); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 136 | return NULL; |
| 137 | } |
Paul Mundt | ecc14e8 | 2008-02-12 16:02:02 +0900 | [diff] [blame] | 138 | EXPORT_SYMBOL_GPL(match_trapped_io_handler); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 139 | |
| 140 | static struct trapped_io *lookup_tiop(unsigned long address) |
| 141 | { |
| 142 | pgd_t *pgd_k; |
| 143 | pud_t *pud_k; |
| 144 | pmd_t *pmd_k; |
| 145 | pte_t *pte_k; |
| 146 | pte_t entry; |
| 147 | |
| 148 | pgd_k = swapper_pg_dir + pgd_index(address); |
| 149 | if (!pgd_present(*pgd_k)) |
| 150 | return NULL; |
| 151 | |
| 152 | pud_k = pud_offset(pgd_k, address); |
| 153 | if (!pud_present(*pud_k)) |
| 154 | return NULL; |
| 155 | |
| 156 | pmd_k = pmd_offset(pud_k, address); |
| 157 | if (!pmd_present(*pmd_k)) |
| 158 | return NULL; |
| 159 | |
| 160 | pte_k = pte_offset_kernel(pmd_k, address); |
| 161 | entry = *pte_k; |
| 162 | |
| 163 | return pfn_to_kaddr(pte_pfn(entry)); |
| 164 | } |
| 165 | |
| 166 | static unsigned long lookup_address(struct trapped_io *tiop, |
| 167 | unsigned long address) |
| 168 | { |
| 169 | struct resource *res; |
| 170 | unsigned long vaddr = (unsigned long)tiop->virt_base; |
| 171 | unsigned long len; |
| 172 | int k; |
| 173 | |
| 174 | for (k = 0; k < tiop->num_resources; k++) { |
| 175 | res = tiop->resource + k; |
| 176 | len = roundup((res->end - res->start) + 1, PAGE_SIZE); |
| 177 | if (address < (vaddr + len)) |
| 178 | return res->start + (address - vaddr); |
| 179 | vaddr += len; |
| 180 | } |
| 181 | return 0; |
| 182 | } |
| 183 | |
| 184 | static unsigned long long copy_word(unsigned long src_addr, int src_len, |
| 185 | unsigned long dst_addr, int dst_len) |
| 186 | { |
| 187 | unsigned long long tmp = 0; |
| 188 | |
| 189 | switch (src_len) { |
| 190 | case 1: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 191 | tmp = __raw_readb(src_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 192 | break; |
| 193 | case 2: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 194 | tmp = __raw_readw(src_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 195 | break; |
| 196 | case 4: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 197 | tmp = __raw_readl(src_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 198 | break; |
| 199 | case 8: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 200 | tmp = __raw_readq(src_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 201 | break; |
| 202 | } |
| 203 | |
| 204 | switch (dst_len) { |
| 205 | case 1: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 206 | __raw_writeb(tmp, dst_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 207 | break; |
| 208 | case 2: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 209 | __raw_writew(tmp, dst_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 210 | break; |
| 211 | case 4: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 212 | __raw_writel(tmp, dst_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 213 | break; |
| 214 | case 8: |
Paul Mundt | 9d56dd3 | 2010-01-26 12:58:40 +0900 | [diff] [blame] | 215 | __raw_writeq(tmp, dst_addr); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 216 | break; |
| 217 | } |
| 218 | |
| 219 | return tmp; |
| 220 | } |
| 221 | |
| 222 | static unsigned long from_device(void *dst, const void *src, unsigned long cnt) |
| 223 | { |
| 224 | struct trapped_io *tiop; |
| 225 | unsigned long src_addr = (unsigned long)src; |
| 226 | unsigned long long tmp; |
| 227 | |
| 228 | pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt); |
| 229 | tiop = lookup_tiop(src_addr); |
| 230 | WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); |
| 231 | |
| 232 | src_addr = lookup_address(tiop, src_addr); |
| 233 | if (!src_addr) |
| 234 | return cnt; |
| 235 | |
Paul Mundt | f1cdd63 | 2008-02-09 19:10:52 +0900 | [diff] [blame] | 236 | tmp = copy_word(src_addr, |
| 237 | max_t(unsigned long, cnt, |
| 238 | (tiop->minimum_bus_width / 8)), |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 239 | (unsigned long)dst, cnt); |
| 240 | |
| 241 | pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp); |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static unsigned long to_device(void *dst, const void *src, unsigned long cnt) |
| 246 | { |
| 247 | struct trapped_io *tiop; |
| 248 | unsigned long dst_addr = (unsigned long)dst; |
| 249 | unsigned long long tmp; |
| 250 | |
| 251 | pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt); |
| 252 | tiop = lookup_tiop(dst_addr); |
| 253 | WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC)); |
| 254 | |
| 255 | dst_addr = lookup_address(tiop, dst_addr); |
| 256 | if (!dst_addr) |
| 257 | return cnt; |
| 258 | |
| 259 | tmp = copy_word((unsigned long)src, cnt, |
Paul Mundt | f1cdd63 | 2008-02-09 19:10:52 +0900 | [diff] [blame] | 260 | dst_addr, max_t(unsigned long, cnt, |
| 261 | (tiop->minimum_bus_width / 8))); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 262 | |
| 263 | pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp); |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static struct mem_access trapped_io_access = { |
| 268 | from_device, |
| 269 | to_device, |
| 270 | }; |
| 271 | |
| 272 | int handle_trapped_io(struct pt_regs *regs, unsigned long address) |
| 273 | { |
| 274 | mm_segment_t oldfs; |
Paul Mundt | 2bcfffa | 2009-05-09 16:02:08 +0900 | [diff] [blame] | 275 | insn_size_t instruction; |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 276 | int tmp; |
| 277 | |
Paul Mundt | 08b36c4 | 2010-01-27 21:56:57 +0900 | [diff] [blame] | 278 | if (trapped_io_disable) |
| 279 | return 0; |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 280 | if (!lookup_tiop(address)) |
| 281 | return 0; |
| 282 | |
| 283 | WARN_ON(user_mode(regs)); |
| 284 | |
| 285 | oldfs = get_fs(); |
| 286 | set_fs(KERNEL_DS); |
| 287 | if (copy_from_user(&instruction, (void *)(regs->pc), |
| 288 | sizeof(instruction))) { |
| 289 | set_fs(oldfs); |
| 290 | return 0; |
| 291 | } |
| 292 | |
Matt Fleming | 4aa5ac4 | 2009-08-28 21:37:20 +0000 | [diff] [blame] | 293 | tmp = handle_unaligned_access(instruction, regs, |
Paul Mundt | ace2dc7 | 2010-10-13 06:55:26 +0900 | [diff] [blame] | 294 | &trapped_io_access, 1, address); |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 295 | set_fs(oldfs); |
| 296 | return tmp == 0; |
| 297 | } |