Johannes Berg | ca2e334 | 2021-03-05 13:19:52 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2021 Intel Corporation |
| 4 | * Author: Johannes Berg <johannes@sipsolutions.net> |
| 5 | */ |
| 6 | #include <linux/types.h> |
| 7 | #include <linux/slab.h> |
| 8 | #include <linux/logic_iomem.h> |
| 9 | |
| 10 | struct logic_iomem_region { |
| 11 | const struct resource *res; |
| 12 | const struct logic_iomem_region_ops *ops; |
| 13 | struct list_head list; |
| 14 | }; |
| 15 | |
| 16 | struct logic_iomem_area { |
| 17 | const struct logic_iomem_ops *ops; |
| 18 | void *priv; |
| 19 | }; |
| 20 | |
| 21 | #define AREA_SHIFT 24 |
| 22 | #define MAX_AREA_SIZE (1 << AREA_SHIFT) |
| 23 | #define MAX_AREAS ((1ULL<<32) / MAX_AREA_SIZE) |
| 24 | #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) |
| 25 | #define AREA_MASK (MAX_AREA_SIZE - 1) |
| 26 | #ifdef CONFIG_64BIT |
| 27 | #define IOREMAP_BIAS 0xDEAD000000000000UL |
| 28 | #define IOREMAP_MASK 0xFFFFFFFF00000000UL |
| 29 | #else |
| 30 | #define IOREMAP_BIAS 0 |
| 31 | #define IOREMAP_MASK 0 |
| 32 | #endif |
| 33 | |
| 34 | static DEFINE_MUTEX(regions_mtx); |
| 35 | static LIST_HEAD(regions_list); |
| 36 | static struct logic_iomem_area mapped_areas[MAX_AREAS]; |
| 37 | |
| 38 | int logic_iomem_add_region(struct resource *resource, |
| 39 | const struct logic_iomem_region_ops *ops) |
| 40 | { |
| 41 | struct logic_iomem_region *rreg; |
| 42 | int err; |
| 43 | |
| 44 | if (WARN_ON(!resource || !ops)) |
| 45 | return -EINVAL; |
| 46 | |
| 47 | if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)) |
| 48 | return -EINVAL; |
| 49 | |
| 50 | rreg = kzalloc(sizeof(*rreg), GFP_KERNEL); |
| 51 | if (!rreg) |
| 52 | return -ENOMEM; |
| 53 | |
| 54 | err = request_resource(&iomem_resource, resource); |
| 55 | if (err) { |
| 56 | kfree(rreg); |
| 57 | return -ENOMEM; |
| 58 | } |
| 59 | |
| 60 | mutex_lock(®ions_mtx); |
| 61 | rreg->res = resource; |
| 62 | rreg->ops = ops; |
| 63 | list_add_tail(&rreg->list, ®ions_list); |
| 64 | mutex_unlock(®ions_mtx); |
| 65 | |
| 66 | return 0; |
| 67 | } |
| 68 | EXPORT_SYMBOL(logic_iomem_add_region); |
| 69 | |
| 70 | #ifndef CONFIG_LOGIC_IOMEM_FALLBACK |
| 71 | static void __iomem *real_ioremap(phys_addr_t offset, size_t size) |
| 72 | { |
| 73 | WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n", |
| 74 | (unsigned long long)offset, size); |
| 75 | return NULL; |
| 76 | } |
| 77 | |
| 78 | static void real_iounmap(void __iomem *addr) |
| 79 | { |
| 80 | WARN(1, "invalid iounmap for addr 0x%llx\n", |
| 81 | (unsigned long long)addr); |
| 82 | } |
| 83 | #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ |
| 84 | |
| 85 | void __iomem *ioremap(phys_addr_t offset, size_t size) |
| 86 | { |
| 87 | void __iomem *ret = NULL; |
| 88 | struct logic_iomem_region *rreg, *found = NULL; |
| 89 | int i; |
| 90 | |
| 91 | mutex_lock(®ions_mtx); |
| 92 | list_for_each_entry(rreg, ®ions_list, list) { |
| 93 | if (rreg->res->start > offset) |
| 94 | continue; |
| 95 | if (rreg->res->end < offset + size - 1) |
| 96 | continue; |
| 97 | found = rreg; |
| 98 | break; |
| 99 | } |
| 100 | |
| 101 | if (!found) |
| 102 | goto out; |
| 103 | |
| 104 | for (i = 0; i < MAX_AREAS; i++) { |
| 105 | long offs; |
| 106 | |
| 107 | if (mapped_areas[i].ops) |
| 108 | continue; |
| 109 | |
| 110 | offs = rreg->ops->map(offset - found->res->start, |
| 111 | size, &mapped_areas[i].ops, |
| 112 | &mapped_areas[i].priv); |
| 113 | if (offs < 0) { |
| 114 | mapped_areas[i].ops = NULL; |
| 115 | break; |
| 116 | } |
| 117 | |
| 118 | if (WARN_ON(!mapped_areas[i].ops)) { |
| 119 | mapped_areas[i].ops = NULL; |
| 120 | break; |
| 121 | } |
| 122 | |
| 123 | ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs); |
| 124 | break; |
| 125 | } |
| 126 | out: |
| 127 | mutex_unlock(®ions_mtx); |
| 128 | if (ret) |
| 129 | return ret; |
| 130 | return real_ioremap(offset, size); |
| 131 | } |
| 132 | EXPORT_SYMBOL(ioremap); |
| 133 | |
| 134 | static inline struct logic_iomem_area * |
| 135 | get_area(const volatile void __iomem *addr) |
| 136 | { |
| 137 | unsigned long a = (unsigned long)addr; |
| 138 | unsigned int idx; |
| 139 | |
| 140 | if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS)) |
| 141 | return NULL; |
| 142 | |
| 143 | idx = (a & AREA_BITS) >> AREA_SHIFT; |
| 144 | |
| 145 | if (mapped_areas[idx].ops) |
| 146 | return &mapped_areas[idx]; |
| 147 | |
| 148 | return NULL; |
| 149 | } |
| 150 | |
| 151 | void iounmap(void __iomem *addr) |
| 152 | { |
| 153 | struct logic_iomem_area *area = get_area(addr); |
| 154 | |
| 155 | if (!area) { |
| 156 | real_iounmap(addr); |
| 157 | return; |
| 158 | } |
| 159 | |
| 160 | if (area->ops->unmap) |
| 161 | area->ops->unmap(area->priv); |
| 162 | |
| 163 | mutex_lock(®ions_mtx); |
| 164 | area->ops = NULL; |
| 165 | area->priv = NULL; |
| 166 | mutex_unlock(®ions_mtx); |
| 167 | } |
| 168 | EXPORT_SYMBOL(iounmap); |
| 169 | |
| 170 | #ifndef CONFIG_LOGIC_IOMEM_FALLBACK |
| 171 | #define MAKE_FALLBACK(op, sz) \ |
| 172 | static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ |
| 173 | { \ |
| 174 | WARN(1, "Invalid read" #op " at address %llx\n", \ |
| 175 | (unsigned long long)addr); \ |
| 176 | return (u ## sz)~0ULL; \ |
| 177 | } \ |
| 178 | \ |
| 179 | void real_raw_write ## op(u ## sz val, volatile void __iomem *addr) \ |
| 180 | { \ |
| 181 | WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ |
| 182 | (unsigned long long)val, (unsigned long long)addr); \ |
| 183 | } \ |
| 184 | |
| 185 | MAKE_FALLBACK(b, 8); |
| 186 | MAKE_FALLBACK(w, 16); |
| 187 | MAKE_FALLBACK(l, 32); |
| 188 | #ifdef CONFIG_64BIT |
| 189 | MAKE_FALLBACK(q, 64); |
| 190 | #endif |
| 191 | |
| 192 | static void real_memset_io(volatile void __iomem *addr, int value, size_t size) |
| 193 | { |
| 194 | WARN(1, "Invalid memset_io at address 0x%llx\n", |
| 195 | (unsigned long long)addr); |
| 196 | } |
| 197 | |
| 198 | static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, |
| 199 | size_t size) |
| 200 | { |
| 201 | WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", |
| 202 | (unsigned long long)addr); |
| 203 | |
| 204 | memset(buffer, 0xff, size); |
| 205 | } |
| 206 | |
| 207 | static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, |
| 208 | size_t size) |
| 209 | { |
| 210 | WARN(1, "Invalid memcpy_toio at address 0x%llx\n", |
| 211 | (unsigned long long)addr); |
| 212 | } |
| 213 | #endif /* CONFIG_LOGIC_IOMEM_FALLBACK */ |
| 214 | |
| 215 | #define MAKE_OP(op, sz) \ |
| 216 | u##sz __raw_read ## op(const volatile void __iomem *addr) \ |
| 217 | { \ |
| 218 | struct logic_iomem_area *area = get_area(addr); \ |
| 219 | \ |
| 220 | if (!area) \ |
| 221 | return real_raw_read ## op(addr); \ |
| 222 | \ |
| 223 | return (u ## sz) area->ops->read(area->priv, \ |
| 224 | (unsigned long)addr & AREA_MASK,\ |
| 225 | sz / 8); \ |
| 226 | } \ |
| 227 | EXPORT_SYMBOL(__raw_read ## op); \ |
| 228 | \ |
| 229 | void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \ |
| 230 | { \ |
| 231 | struct logic_iomem_area *area = get_area(addr); \ |
| 232 | \ |
| 233 | if (!area) { \ |
| 234 | real_raw_write ## op(val, addr); \ |
| 235 | return; \ |
| 236 | } \ |
| 237 | \ |
| 238 | area->ops->write(area->priv, \ |
| 239 | (unsigned long)addr & AREA_MASK, \ |
| 240 | sz / 8, val); \ |
| 241 | } \ |
| 242 | EXPORT_SYMBOL(__raw_write ## op) |
| 243 | |
| 244 | MAKE_OP(b, 8); |
| 245 | MAKE_OP(w, 16); |
| 246 | MAKE_OP(l, 32); |
| 247 | #ifdef CONFIG_64BIT |
| 248 | MAKE_OP(q, 64); |
| 249 | #endif |
| 250 | |
| 251 | void memset_io(volatile void __iomem *addr, int value, size_t size) |
| 252 | { |
| 253 | struct logic_iomem_area *area = get_area(addr); |
| 254 | unsigned long offs, start; |
| 255 | |
| 256 | if (!area) { |
| 257 | real_memset_io(addr, value, size); |
| 258 | return; |
| 259 | } |
| 260 | |
| 261 | start = (unsigned long)addr & AREA_MASK; |
| 262 | |
| 263 | if (area->ops->set) { |
| 264 | area->ops->set(area->priv, start, value, size); |
| 265 | return; |
| 266 | } |
| 267 | |
| 268 | for (offs = 0; offs < size; offs++) |
| 269 | area->ops->write(area->priv, start + offs, 1, value); |
| 270 | } |
| 271 | EXPORT_SYMBOL(memset_io); |
| 272 | |
| 273 | void memcpy_fromio(void *buffer, const volatile void __iomem *addr, |
| 274 | size_t size) |
| 275 | { |
| 276 | struct logic_iomem_area *area = get_area(addr); |
| 277 | u8 *buf = buffer; |
| 278 | unsigned long offs, start; |
| 279 | |
| 280 | if (!area) { |
| 281 | real_memcpy_fromio(buffer, addr, size); |
| 282 | return; |
| 283 | } |
| 284 | |
| 285 | start = (unsigned long)addr & AREA_MASK; |
| 286 | |
| 287 | if (area->ops->copy_from) { |
| 288 | area->ops->copy_from(area->priv, buffer, start, size); |
| 289 | return; |
| 290 | } |
| 291 | |
| 292 | for (offs = 0; offs < size; offs++) |
| 293 | buf[offs] = area->ops->read(area->priv, start + offs, 1); |
| 294 | } |
| 295 | EXPORT_SYMBOL(memcpy_fromio); |
| 296 | |
| 297 | void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) |
| 298 | { |
| 299 | struct logic_iomem_area *area = get_area(addr); |
| 300 | const u8 *buf = buffer; |
| 301 | unsigned long offs, start; |
| 302 | |
| 303 | if (!area) { |
| 304 | real_memcpy_toio(addr, buffer, size); |
| 305 | return; |
| 306 | } |
| 307 | |
| 308 | start = (unsigned long)addr & AREA_MASK; |
| 309 | |
| 310 | if (area->ops->copy_to) { |
| 311 | area->ops->copy_to(area->priv, start, buffer, size); |
| 312 | return; |
| 313 | } |
| 314 | |
| 315 | for (offs = 0; offs < size; offs++) |
| 316 | area->ops->write(area->priv, start + offs, 1, buf[offs]); |
| 317 | } |
| 318 | EXPORT_SYMBOL(memcpy_toio); |