Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 1994, 1995 Waldorf GmbH |
Ralf Baechle | 966f440 | 2006-03-15 11:36:31 +0000 | [diff] [blame] | 7 | * Copyright (C) 1994 - 2000, 06 Ralf Baechle |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
| 9 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 10 | * Author: Maciej W. Rozycki <macro@mips.com> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ |
| 12 | #ifndef _ASM_IO_H |
| 13 | #define _ASM_IO_H |
| 14 | |
Serge Semin | 9748e33 | 2018-07-09 16:57:12 +0300 | [diff] [blame] | 15 | #define ARCH_HAS_IOREMAP_WC |
| 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/compiler.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/types.h> |
Jim Quinlan | 92d1159 | 2012-09-06 11:36:55 -0400 | [diff] [blame] | 20 | #include <linux/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/addrspace.h> |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 23 | #include <asm/barrier.h> |
Yoichi Yuasa | 893a057 | 2012-07-18 14:12:01 -0700 | [diff] [blame] | 24 | #include <asm/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <asm/byteorder.h> |
| 26 | #include <asm/cpu.h> |
| 27 | #include <asm/cpu-features.h> |
Ralf Baechle | 140c172 | 2006-12-07 15:35:43 +0100 | [diff] [blame] | 28 | #include <asm-generic/iomap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/page.h> |
| 30 | #include <asm/pgtable-bits.h> |
| 31 | #include <asm/processor.h> |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 32 | #include <asm/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 34 | #include <ioremap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <mangle-port.h> |
| 36 | |
| 37 | /* |
Maciej W. Rozycki | 4912ba7 | 2005-02-22 21:49:17 +0000 | [diff] [blame] | 38 | * Raw operations are never swapped in software. OTOH values that raw |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * operations are working on may or may not have been swapped by the bus |
| 40 | * hardware. An example use would be for flash memory that's used for |
| 41 | * execute in place. |
| 42 | */ |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 43 | # define __raw_ioswabb(a, x) (x) |
| 44 | # define __raw_ioswabw(a, x) (x) |
| 45 | # define __raw_ioswabl(a, x) (x) |
| 46 | # define __raw_ioswabq(a, x) (x) |
| 47 | # define ____raw_ioswabq(a, x) (x) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 49 | # define __relaxed_ioswabb ioswabb |
| 50 | # define __relaxed_ioswabw ioswabw |
| 51 | # define __relaxed_ioswabl ioswabl |
| 52 | # define __relaxed_ioswabq ioswabq |
| 53 | |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 54 | /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | #define IO_SPACE_LIMIT 0xffff |
| 57 | |
| 58 | /* |
| 59 | * On MIPS I/O ports are memory mapped, so we access them using normal |
| 60 | * load/store instructions. mips_io_port_base is the virtual address to |
| 61 | * which all ports are being mapped. For sake of efficiency some code |
| 62 | * assumes that this is an address that can be loaded with a single lui |
| 63 | * instruction, so the lower 16 bits must be zero. Should be true on |
| 64 | * on any sane architecture; generic code does not use this assumption. |
| 65 | */ |
Nick Desaulniers | 12051b31 | 2019-07-29 14:10:12 -0700 | [diff] [blame] | 66 | extern unsigned long mips_io_port_base; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
Ralf Baechle | 966f440 | 2006-03-15 11:36:31 +0000 | [diff] [blame] | 68 | static inline void set_io_port_base(unsigned long base) |
| 69 | { |
Nick Desaulniers | 12051b31 | 2019-07-29 14:10:12 -0700 | [diff] [blame] | 70 | mips_io_port_base = base; |
Ralf Baechle | 966f440 | 2006-03-15 11:36:31 +0000 | [diff] [blame] | 71 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | /* |
Paul Burton | b962aeb | 2018-08-29 14:54:00 -0700 | [diff] [blame] | 74 | * Provide the necessary definitions for generic iomap. We make use of |
| 75 | * mips_io_port_base for iomap(), but we don't reserve any low addresses for |
| 76 | * use with I/O ports. |
| 77 | */ |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 78 | |
Paul Burton | b962aeb | 2018-08-29 14:54:00 -0700 | [diff] [blame] | 79 | #define HAVE_ARCH_PIO_SIZE |
| 80 | #define PIO_OFFSET mips_io_port_base |
| 81 | #define PIO_MASK IO_SPACE_LIMIT |
| 82 | #define PIO_RESERVED 0x0UL |
| 83 | |
| 84 | /* |
Maciej W. Rozycki | 4ae0452 | 2018-10-08 01:37:01 +0100 | [diff] [blame] | 85 | * Enforce in-order execution of data I/O. In the MIPS architecture |
| 86 | * these are equivalent to corresponding platform-specific memory |
| 87 | * barriers defined in <asm/barrier.h>. API pinched from PowerPC, |
| 88 | * with sync additionally defined. |
| 89 | */ |
| 90 | #define iobarrier_rw() mb() |
| 91 | #define iobarrier_r() rmb() |
| 92 | #define iobarrier_w() wmb() |
| 93 | #define iobarrier_sync() iob() |
| 94 | |
| 95 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | * virt_to_phys - map virtual addresses to physical |
| 97 | * @address: address to remap |
| 98 | * |
| 99 | * The returned physical address is the physical (CPU) mapping for |
| 100 | * the memory address given. It is only valid to use this function on |
| 101 | * addresses directly mapped or allocated via kmalloc. |
| 102 | * |
| 103 | * This function does not give bus mappings for DMA transfers. In |
| 104 | * almost all conceivable cases a device driver should not be using |
| 105 | * this function |
| 106 | */ |
Franck Bui-Huu | 99e3b94 | 2006-10-19 13:19:59 +0200 | [diff] [blame] | 107 | static inline unsigned long virt_to_phys(volatile const void *address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | { |
David Daney | 49c426b | 2013-05-07 17:11:16 +0000 | [diff] [blame] | 109 | return __pa(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | /* |
| 113 | * phys_to_virt - map physical address to virtual |
| 114 | * @address: address to remap |
| 115 | * |
| 116 | * The returned virtual address is a current CPU mapping for |
| 117 | * the memory address given. It is only valid to use this function on |
| 118 | * addresses that have a kernel mapping |
| 119 | * |
| 120 | * This function does not handle bus mappings for DMA transfers. In |
| 121 | * almost all conceivable cases a device driver should not be using |
| 122 | * this function |
| 123 | */ |
| 124 | static inline void * phys_to_virt(unsigned long address) |
| 125 | { |
Franck Bui-Huu | 6f284a2 | 2007-01-10 09:44:05 +0100 | [diff] [blame] | 126 | return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | /* |
| 130 | * ISA I/O bus memory addresses are 1:1 with the physical address. |
| 131 | */ |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 132 | static inline unsigned long isa_virt_to_bus(volatile void *address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | { |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 134 | return virt_to_phys(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 137 | static inline void *isa_bus_to_virt(unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | { |
Paul Burton | 0494d7f | 2018-07-27 18:23:19 -0700 | [diff] [blame] | 139 | return phys_to_virt(address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | } |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* |
| 143 | * However PCI ones are not necessarily 1:1 and therefore these interfaces |
| 144 | * are forbidden in portable PCI drivers. |
| 145 | * |
| 146 | * Allow them for x86 for legacy drivers, though. |
| 147 | */ |
| 148 | #define virt_to_bus virt_to_phys |
| 149 | #define bus_to_virt phys_to_virt |
| 150 | |
| 151 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | * Change "struct page" to physical address. |
| 153 | */ |
| 154 | #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) |
| 155 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 156 | extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags); |
Ralf Baechle | d89e36d | 2006-10-19 14:21:47 +0100 | [diff] [blame] | 157 | extern void __iounmap(const volatile void __iomem *addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 159 | static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | unsigned long flags) |
| 161 | { |
Atsushi Nemoto | 5ddcb3c | 2007-06-26 01:14:01 +0900 | [diff] [blame] | 162 | void __iomem *addr = plat_ioremap(offset, size, flags); |
| 163 | |
| 164 | if (addr) |
| 165 | return addr; |
| 166 | |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 167 | #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL)) |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 168 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | if (cpu_has_64bit_addresses) { |
| 170 | u64 base = UNCAC_BASE; |
| 171 | |
| 172 | /* |
| 173 | * R10000 supports a 2 bit uncached attribute therefore |
| 174 | * UNCAC_BASE may not equal IO_BASE. |
| 175 | */ |
| 176 | if (flags == _CACHE_UNCACHED) |
| 177 | base = (u64) IO_BASE; |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 178 | return (void __iomem *) (unsigned long) (base + offset); |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 179 | } else if (__builtin_constant_p(offset) && |
| 180 | __builtin_constant_p(size) && __builtin_constant_p(flags)) { |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 181 | phys_addr_t phys_addr, last_addr; |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 182 | |
| 183 | phys_addr = fixup_bigphys_addr(offset, size); |
| 184 | |
| 185 | /* Don't allow wraparound or zero size. */ |
| 186 | last_addr = phys_addr + size - 1; |
| 187 | if (!size || last_addr < phys_addr) |
| 188 | return NULL; |
| 189 | |
| 190 | /* |
| 191 | * Map uncached objects in the low 512MB of address |
| 192 | * space using KSEG1. |
| 193 | */ |
| 194 | if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) && |
| 195 | flags == _CACHE_UNCACHED) |
Atsushi Nemoto | c0cf500 | 2007-07-11 23:12:00 +0900 | [diff] [blame] | 196 | return (void __iomem *) |
| 197 | (unsigned long)CKSEG1ADDR(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | return __ioremap(offset, size, flags); |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 201 | |
| 202 | #undef __IS_LOW512 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | } |
| 204 | |
| 205 | /* |
Hassan Naveed | b3a428b | 2018-10-29 18:27:41 -0700 | [diff] [blame] | 206 | * ioremap_prot - map bus memory into CPU space |
| 207 | * @offset: bus address of the memory |
| 208 | * @size: size of the resource to map |
| 209 | |
| 210 | * ioremap_prot gives the caller control over cache coherency attributes (CCA) |
| 211 | */ |
| 212 | static inline void __iomem *ioremap_prot(phys_addr_t offset, |
| 213 | unsigned long size, unsigned long prot_val) { |
| 214 | return __ioremap_mode(offset, size, prot_val & _CACHE_MASK); |
| 215 | } |
| 216 | |
| 217 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | * ioremap - map bus memory into CPU space |
| 219 | * @offset: bus address of the memory |
| 220 | * @size: size of the resource to map |
| 221 | * |
| 222 | * ioremap performs a platform specific sequence of operations to |
| 223 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 224 | * writew/writel functions and the other mmio helpers. The returned |
| 225 | * address is not guaranteed to be usable directly as a virtual |
| 226 | * address. |
| 227 | */ |
| 228 | #define ioremap(offset, size) \ |
| 229 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
| 230 | |
| 231 | /* |
| 232 | * ioremap_nocache - map bus memory into CPU space |
| 233 | * @offset: bus address of the memory |
| 234 | * @size: size of the resource to map |
| 235 | * |
| 236 | * ioremap_nocache performs a platform specific sequence of operations to |
| 237 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 238 | * writew/writel functions and the other mmio helpers. The returned |
| 239 | * address is not guaranteed to be usable directly as a virtual |
| 240 | * address. |
| 241 | * |
| 242 | * This version of ioremap ensures that the memory is marked uncachable |
| 243 | * on the CPU as well as honouring existing caching rules from things like |
| 244 | * the PCI bus. Note that there are other caches and buffers on many |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 245 | * busses. In particular driver authors should read up on PCI writes |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | * |
| 247 | * It's useful if some control registers are in such an area and |
| 248 | * write combining or read caching is not desirable: |
| 249 | */ |
| 250 | #define ioremap_nocache(offset, size) \ |
| 251 | __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
Ben Hutchings | da11f98 | 2015-10-06 00:56:56 +0100 | [diff] [blame] | 252 | #define ioremap_uc ioremap_nocache |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | |
| 254 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 255 | * ioremap_cachable - map bus memory into CPU space |
| 256 | * @offset: bus address of the memory |
| 257 | * @size: size of the resource to map |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 258 | * |
| 259 | * ioremap_nocache performs a platform specific sequence of operations to |
| 260 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 261 | * writew/writel functions and the other mmio helpers. The returned |
| 262 | * address is not guaranteed to be usable directly as a virtual |
| 263 | * address. |
| 264 | * |
| 265 | * This version of ioremap ensures that the memory is marked cachable by |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 266 | * the CPU. Also enables full write-combining. Useful for some |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 267 | * memory-like regions on I/O busses. |
| 268 | */ |
| 269 | #define ioremap_cachable(offset, size) \ |
Chris Dearman | 3513369 | 2007-09-19 00:58:24 +0100 | [diff] [blame] | 270 | __ioremap_mode((offset), (size), _page_cachable_default) |
Maciej W. Rozycki | a68f376 | 2016-01-09 02:05:31 +0000 | [diff] [blame] | 271 | #define ioremap_cache ioremap_cachable |
Ralf Baechle | 778e2ac | 2006-02-28 17:04:20 +0000 | [diff] [blame] | 272 | |
| 273 | /* |
Serge Semin | 9748e33 | 2018-07-09 16:57:12 +0300 | [diff] [blame] | 274 | * ioremap_wc - map bus memory into CPU space |
| 275 | * @offset: bus address of the memory |
| 276 | * @size: size of the resource to map |
| 277 | * |
| 278 | * ioremap_wc performs a platform specific sequence of operations to |
| 279 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 280 | * writew/writel functions and the other mmio helpers. The returned |
| 281 | * address is not guaranteed to be usable directly as a virtual |
| 282 | * address. |
| 283 | * |
| 284 | * This version of ioremap ensures that the memory is marked uncachable |
| 285 | * but accelerated by means of write-combining feature. It is specifically |
| 286 | * useful for PCIe prefetchable windows, which may vastly improve a |
| 287 | * communications performance. If it was determined on boot stage, what |
| 288 | * CPU CCA doesn't support UCA, the method shall fall-back to the |
| 289 | * _CACHE_UNCACHED option (see cpu_probe() method). |
| 290 | */ |
| 291 | #define ioremap_wc(offset, size) \ |
| 292 | __ioremap_mode((offset), (size), boot_cpu_data.writecombine) |
| 293 | |
Ralf Baechle | d89e36d | 2006-10-19 14:21:47 +0100 | [diff] [blame] | 294 | static inline void iounmap(const volatile void __iomem *addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | { |
Atsushi Nemoto | 5ddcb3c | 2007-06-26 01:14:01 +0900 | [diff] [blame] | 296 | if (plat_iounmap(addr)) |
| 297 | return; |
| 298 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 299 | #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1) |
| 300 | |
| 301 | if (cpu_has_64bit_addresses || |
| 302 | (__builtin_constant_p(addr) && __IS_KSEG1(addr))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | return; |
| 304 | |
| 305 | __iounmap(addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
Maciej W. Rozycki | c3455b0 | 2005-06-30 10:48:40 +0000 | [diff] [blame] | 307 | #undef __IS_KSEG1 |
| 308 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | |
Huacai Chen | c824ad1 | 2018-09-05 17:33:01 +0800 | [diff] [blame] | 310 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3) |
Huacai Chen | 1e820da3 | 2016-03-03 09:45:13 +0800 | [diff] [blame] | 311 | #define war_io_reorder_wmb() wmb() |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 312 | #else |
Sinan Kaya | f6b7aee | 2018-04-03 08:55:03 -0400 | [diff] [blame] | 313 | #define war_io_reorder_wmb() barrier() |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 314 | #endif |
| 315 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 316 | #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | \ |
| 318 | static inline void pfx##write##bwlq(type val, \ |
| 319 | volatile void __iomem *mem) \ |
| 320 | { \ |
| 321 | volatile type *__mem; \ |
| 322 | type __val; \ |
| 323 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame] | 324 | if (barrier) \ |
| 325 | iobarrier_rw(); \ |
| 326 | else \ |
| 327 | war_io_reorder_wmb(); \ |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 328 | \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
| 330 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 331 | __val = pfx##ioswab##bwlq(__mem, val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 333 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | *__mem = __val; \ |
| 335 | else if (cpu_has_64bits) { \ |
| 336 | unsigned long __flags; \ |
| 337 | type __tmp; \ |
| 338 | \ |
| 339 | if (irq) \ |
| 340 | local_irq_save(__flags); \ |
| 341 | __asm__ __volatile__( \ |
Paul Burton | 378ed6f | 2018-11-08 20:14:38 +0000 | [diff] [blame] | 342 | ".set push" "\t\t# __writeq""\n\t" \ |
| 343 | ".set arch=r4000" "\n\t" \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 344 | "dsll32 %L0, %L0, 0" "\n\t" \ |
| 345 | "dsrl32 %L0, %L0, 0" "\n\t" \ |
| 346 | "dsll32 %M0, %M0, 0" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | "or %L0, %L0, %M0" "\n\t" \ |
| 348 | "sd %L0, %2" "\n\t" \ |
Paul Burton | 378ed6f | 2018-11-08 20:14:38 +0000 | [diff] [blame] | 349 | ".set pop" "\n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | : "=r" (__tmp) \ |
Ralf Baechle | b77bb37 | 2011-06-30 14:43:14 +0100 | [diff] [blame] | 351 | : "0" (__val), "m" (*__mem)); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | if (irq) \ |
| 353 | local_irq_restore(__flags); \ |
| 354 | } else \ |
| 355 | BUG(); \ |
| 356 | } \ |
| 357 | \ |
Atsushi Nemoto | b887d3f | 2006-02-09 00:57:44 +0900 | [diff] [blame] | 358 | static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | { \ |
| 360 | volatile type *__mem; \ |
| 361 | type __val; \ |
| 362 | \ |
| 363 | __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ |
| 364 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame] | 365 | if (barrier) \ |
| 366 | iobarrier_rw(); \ |
| 367 | \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 368 | if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | __val = *__mem; \ |
| 370 | else if (cpu_has_64bits) { \ |
| 371 | unsigned long __flags; \ |
| 372 | \ |
Thiemo Seufer | 049b13c | 2005-02-21 11:44:31 +0000 | [diff] [blame] | 373 | if (irq) \ |
| 374 | local_irq_save(__flags); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | __asm__ __volatile__( \ |
Paul Burton | 378ed6f | 2018-11-08 20:14:38 +0000 | [diff] [blame] | 376 | ".set push" "\t\t# __readq" "\n\t" \ |
| 377 | ".set arch=r4000" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | "ld %L0, %1" "\n\t" \ |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 379 | "dsra32 %M0, %L0, 0" "\n\t" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | "sll %L0, %L0, 0" "\n\t" \ |
Paul Burton | 378ed6f | 2018-11-08 20:14:38 +0000 | [diff] [blame] | 381 | ".set pop" "\n" \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | : "=r" (__val) \ |
Ralf Baechle | b77bb37 | 2011-06-30 14:43:14 +0100 | [diff] [blame] | 383 | : "m" (*__mem)); \ |
Thiemo Seufer | 049b13c | 2005-02-21 11:44:31 +0000 | [diff] [blame] | 384 | if (irq) \ |
| 385 | local_irq_restore(__flags); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | } else { \ |
| 387 | __val = 0; \ |
| 388 | BUG(); \ |
| 389 | } \ |
| 390 | \ |
Sinan Kaya | a1cc703 | 2018-04-12 22:30:44 -0400 | [diff] [blame] | 391 | /* prevent prefetching of coherent DMA data prematurely */ \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 392 | if (!relax) \ |
| 393 | rmb(); \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 394 | return pfx##ioswab##bwlq(__mem, __val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | } |
| 396 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 397 | #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | \ |
| 399 | static inline void pfx##out##bwlq##p(type val, unsigned long port) \ |
| 400 | { \ |
| 401 | volatile type *__addr; \ |
| 402 | type __val; \ |
| 403 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame] | 404 | if (barrier) \ |
| 405 | iobarrier_rw(); \ |
| 406 | else \ |
| 407 | war_io_reorder_wmb(); \ |
David Daney | 8faca49 | 2008-12-11 15:33:29 -0800 | [diff] [blame] | 408 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 409 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 411 | __val = pfx##ioswab##bwlq(__addr, val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 413 | /* Really, we want this to be atomic */ \ |
| 414 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 415 | \ |
| 416 | *__addr = __val; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | } \ |
| 418 | \ |
| 419 | static inline type pfx##in##bwlq##p(unsigned long port) \ |
| 420 | { \ |
| 421 | volatile type *__addr; \ |
| 422 | type __val; \ |
| 423 | \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 424 | __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 426 | BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ |
| 427 | \ |
Maciej W. Rozycki | 3d474da | 2018-10-08 01:37:16 +0100 | [diff] [blame] | 428 | if (barrier) \ |
| 429 | iobarrier_rw(); \ |
| 430 | \ |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 431 | __val = *__addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | \ |
Huacai Chen | 18f3e95 | 2018-06-12 17:54:42 +0800 | [diff] [blame] | 433 | /* prevent prefetching of coherent DMA data prematurely */ \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 434 | if (!relax) \ |
| 435 | rmb(); \ |
Atsushi Nemoto | a843313 | 2006-02-17 01:36:24 +0900 | [diff] [blame] | 436 | return pfx##ioswab##bwlq(__addr, __val); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | } |
| 438 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 439 | #define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 441 | __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 443 | #define BUILDIO_MEM(bwlq, type) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 445 | __BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \ |
| 446 | __BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \ |
| 447 | __BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \ |
| 448 | __BUILD_MEMORY_PFX(, bwlq, type, 0) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 449 | |
| 450 | BUILDIO_MEM(b, u8) |
| 451 | BUILDIO_MEM(w, u16) |
| 452 | BUILDIO_MEM(l, u32) |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 453 | #ifdef CONFIG_64BIT |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 454 | BUILDIO_MEM(q, u64) |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 455 | #else |
| 456 | __BUILD_MEMORY_PFX(__raw_, q, u64, 0) |
| 457 | __BUILD_MEMORY_PFX(__mem_, q, u64, 0) |
| 458 | #endif |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 459 | |
| 460 | #define __BUILD_IOPORT_PFX(bus, bwlq, type) \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 461 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \ |
| 462 | __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 463 | |
| 464 | #define BUILDIO_IOPORT(bwlq, type) \ |
| 465 | __BUILD_IOPORT_PFX(, bwlq, type) \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 466 | __BUILD_IOPORT_PFX(__mem_, bwlq, type) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 467 | |
| 468 | BUILDIO_IOPORT(b, u8) |
| 469 | BUILDIO_IOPORT(w, u16) |
| 470 | BUILDIO_IOPORT(l, u32) |
| 471 | #ifdef CONFIG_64BIT |
| 472 | BUILDIO_IOPORT(q, u64) |
| 473 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | |
| 475 | #define __BUILDIO(bwlq, type) \ |
| 476 | \ |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 477 | __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 | __BUILDIO(q, u64) |
| 480 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 481 | #define readb_relaxed __relaxed_readb |
| 482 | #define readw_relaxed __relaxed_readw |
| 483 | #define readl_relaxed __relaxed_readl |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 484 | #ifdef CONFIG_64BIT |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 485 | #define readq_relaxed __relaxed_readq |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 486 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 488 | #define writeb_relaxed __relaxed_writeb |
| 489 | #define writew_relaxed __relaxed_writew |
| 490 | #define writel_relaxed __relaxed_writel |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 491 | #ifdef CONFIG_64BIT |
Maciej W. Rozycki | 8b65625 | 2018-10-08 01:37:23 +0100 | [diff] [blame] | 492 | #define writeq_relaxed __relaxed_writeq |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 493 | #endif |
Florian Fainelli | edd4201 | 2013-05-31 13:07:44 +0000 | [diff] [blame] | 494 | |
Florian Fainelli | f868ba2 | 2009-12-16 11:29:06 +0100 | [diff] [blame] | 495 | #define readb_be(addr) \ |
| 496 | __raw_readb((__force unsigned *)(addr)) |
| 497 | #define readw_be(addr) \ |
| 498 | be16_to_cpu(__raw_readw((__force unsigned *)(addr))) |
| 499 | #define readl_be(addr) \ |
| 500 | be32_to_cpu(__raw_readl((__force unsigned *)(addr))) |
| 501 | #define readq_be(addr) \ |
| 502 | be64_to_cpu(__raw_readq((__force unsigned *)(addr))) |
| 503 | |
| 504 | #define writeb_be(val, addr) \ |
| 505 | __raw_writeb((val), (__force unsigned *)(addr)) |
| 506 | #define writew_be(val, addr) \ |
| 507 | __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) |
| 508 | #define writel_be(val, addr) \ |
| 509 | __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) |
| 510 | #define writeq_be(val, addr) \ |
| 511 | __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) |
| 512 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | /* |
| 514 | * Some code tests for these symbols |
| 515 | */ |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 516 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | #define readq readq |
| 518 | #define writeq writeq |
Serge Semin | 1e27914 | 2019-06-14 09:33:42 +0300 | [diff] [blame] | 519 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | |
| 521 | #define __BUILD_MEMORY_STRING(bwlq, type) \ |
| 522 | \ |
Arnaud Giersch | 99289a4 | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 523 | static inline void writes##bwlq(volatile void __iomem *mem, \ |
| 524 | const void *addr, unsigned int count) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | { \ |
Arnaud Giersch | 99289a4 | 2005-11-13 00:38:18 +0100 | [diff] [blame] | 526 | const volatile type *__addr = addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | \ |
| 528 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 529 | __mem_write##bwlq(*__addr, mem); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | __addr++; \ |
| 531 | } \ |
| 532 | } \ |
| 533 | \ |
| 534 | static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ |
| 535 | unsigned int count) \ |
| 536 | { \ |
| 537 | volatile type *__addr = addr; \ |
| 538 | \ |
| 539 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 540 | *__addr = __mem_read##bwlq(mem); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | __addr++; \ |
| 542 | } \ |
| 543 | } |
| 544 | |
| 545 | #define __BUILD_IOPORT_STRING(bwlq, type) \ |
| 546 | \ |
Ralf Baechle | ecba36d | 2005-04-18 14:54:43 +0000 | [diff] [blame] | 547 | static inline void outs##bwlq(unsigned long port, const void *addr, \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | unsigned int count) \ |
| 549 | { \ |
Ralf Baechle | ecba36d | 2005-04-18 14:54:43 +0000 | [diff] [blame] | 550 | const volatile type *__addr = addr; \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 551 | \ |
| 552 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 553 | __mem_out##bwlq(*__addr, port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | __addr++; \ |
| 555 | } \ |
| 556 | } \ |
| 557 | \ |
| 558 | static inline void ins##bwlq(unsigned long port, void *addr, \ |
| 559 | unsigned int count) \ |
| 560 | { \ |
| 561 | volatile type *__addr = addr; \ |
| 562 | \ |
| 563 | while (count--) { \ |
Al Viro | 290f10a | 2005-12-07 23:12:54 -0500 | [diff] [blame] | 564 | *__addr = __mem_in##bwlq(port); \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | __addr++; \ |
| 566 | } \ |
| 567 | } |
| 568 | |
| 569 | #define BUILDSTRING(bwlq, type) \ |
| 570 | \ |
| 571 | __BUILD_MEMORY_STRING(bwlq, type) \ |
| 572 | __BUILD_IOPORT_STRING(bwlq, type) |
| 573 | |
| 574 | BUILDSTRING(b, u8) |
| 575 | BUILDSTRING(w, u16) |
| 576 | BUILDSTRING(l, u32) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 577 | #ifdef CONFIG_64BIT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | BUILDSTRING(q, u64) |
Ralf Baechle | 9d58f30 | 2005-09-23 20:02:38 +0000 | [diff] [blame] | 579 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | |
Ralf Baechle | fe00f94 | 2005-03-01 19:22:29 +0000 | [diff] [blame] | 581 | static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) |
| 582 | { |
| 583 | memset((void __force *) addr, val, count); |
| 584 | } |
| 585 | static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) |
| 586 | { |
| 587 | memcpy(dst, (void __force *) src, count); |
| 588 | } |
| 589 | static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) |
| 590 | { |
| 591 | memcpy((void __force *) dst, src, count); |
| 592 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | |
| 594 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | * The caches on some architectures aren't dma-coherent and have need to |
| 596 | * handle this in software. There are three types of operations that |
| 597 | * can be applied to dma buffers. |
| 598 | * |
| 599 | * - dma_cache_wback_inv(start, size) makes caches and coherent by |
| 600 | * writing the content of the caches back to memory, if necessary. |
| 601 | * The function also invalidates the affected part of the caches as |
| 602 | * necessary before DMA transfers from outside to memory. |
| 603 | * - dma_cache_wback(start, size) makes caches and coherent by |
| 604 | * writing the content of the caches back to memory, if necessary. |
| 605 | * The function also invalidates the affected part of the caches as |
| 606 | * necessary before DMA transfers from outside to memory. |
| 607 | * - dma_cache_inv(start, size) invalidates the affected parts of the |
| 608 | * caches. Dirty lines of the caches may be written back or simply |
| 609 | * be discarded. This operation is necessary before dma operations |
| 610 | * to the memory. |
Ralf Baechle | 622a9ed | 2007-10-16 23:29:42 -0700 | [diff] [blame] | 611 | * |
| 612 | * This API used to be exported; it now is for arch code internal use only. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | */ |
Christoph Hellwig | 972dc3b | 2018-06-15 13:08:31 +0200 | [diff] [blame] | 614 | #ifdef CONFIG_DMA_NONCOHERENT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
| 616 | extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); |
| 617 | extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); |
| 618 | extern void (*_dma_cache_inv)(unsigned long start, unsigned long size); |
| 619 | |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 620 | #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size) |
| 621 | #define dma_cache_wback(start, size) _dma_cache_wback(start, size) |
| 622 | #define dma_cache_inv(start, size) _dma_cache_inv(start, size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | |
| 624 | #else /* Sane hardware */ |
| 625 | |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 626 | #define dma_cache_wback_inv(start,size) \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | do { (void) (start); (void) (size); } while (0) |
| 628 | #define dma_cache_wback(start,size) \ |
| 629 | do { (void) (start); (void) (size); } while (0) |
| 630 | #define dma_cache_inv(start,size) \ |
| 631 | do { (void) (start); (void) (size); } while (0) |
| 632 | |
Christoph Hellwig | 972dc3b | 2018-06-15 13:08:31 +0200 | [diff] [blame] | 633 | #endif /* CONFIG_DMA_NONCOHERENT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | |
| 635 | /* |
| 636 | * Read a 32-bit register that requires a 64-bit read cycle on the bus. |
| 637 | * Avoid interrupt mucking, just adjust the address for 4-byte access. |
| 638 | * Assume the addresses are 8-byte aligned. |
| 639 | */ |
| 640 | #ifdef __MIPSEB__ |
| 641 | #define __CSR_32_ADJUST 4 |
| 642 | #else |
| 643 | #define __CSR_32_ADJUST 0 |
| 644 | #endif |
| 645 | |
Ralf Baechle | 21a151d | 2007-10-11 23:46:15 +0100 | [diff] [blame] | 646 | #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 647 | #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) |
| 648 | |
| 649 | /* |
| 650 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 651 | * access |
| 652 | */ |
| 653 | #define xlate_dev_mem_ptr(p) __va(p) |
| 654 | |
| 655 | /* |
| 656 | * Convert a virtual cached pointer to an uncached pointer |
| 657 | */ |
| 658 | #define xlate_dev_kmem_ptr(p) p |
| 659 | |
Paul Burton | d8c825e | 2017-08-12 21:36:15 -0700 | [diff] [blame] | 660 | void __ioread64_copy(void *to, const void __iomem *from, size_t count); |
| 661 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 662 | #endif /* _ASM_IO_H */ |