David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com> |
| 7 | * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org> |
| 8 | * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> |
| 9 | * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. |
| 10 | * IP32 changes by Ilya. |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 11 | * Copyright (C) 2010 Cavium Networks, Inc. |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 12 | */ |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 13 | #include <linux/dma-direct.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 14 | #include <linux/memblock.h> |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 15 | #include <linux/swiotlb.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/mm.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 19 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 20 | #include <asm/bootinfo.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/octeon/octeon.h> |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 23 | |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 24 | #ifdef CONFIG_PCI |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 25 | #include <linux/pci.h> |
David Daney | 01a6221 | 2009-06-29 17:18:51 -0700 | [diff] [blame] | 26 | #include <asm/octeon/pci-octeon.h> |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 27 | #include <asm/octeon/cvmx-npi-defs.h> |
| 28 | #include <asm/octeon/cvmx-pci-defs.h> |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 29 | |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 30 | struct octeon_dma_map_ops { |
| 31 | dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr); |
| 32 | phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr); |
| 33 | }; |
| 34 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 35 | static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 36 | { |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 37 | if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) |
| 38 | return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE; |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 39 | else |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 40 | return paddr; |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 41 | } |
| 42 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 43 | static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr) |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 44 | { |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 45 | if (daddr >= CVMX_PCIE_BAR1_RC_BASE) |
| 46 | return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE; |
| 47 | else |
| 48 | return daddr; |
| 49 | } |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 50 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 51 | static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 52 | { |
| 53 | if (paddr >= 0x410000000ull && paddr < 0x420000000ull) |
| 54 | paddr -= 0x400000000ull; |
| 55 | return octeon_hole_phys_to_dma(paddr); |
| 56 | } |
| 57 | |
| 58 | static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 59 | { |
| 60 | daddr = octeon_hole_dma_to_phys(daddr); |
| 61 | |
| 62 | if (daddr >= 0x10000000ull && daddr < 0x20000000ull) |
| 63 | daddr += 0x400000000ull; |
| 64 | |
| 65 | return daddr; |
| 66 | } |
| 67 | |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 68 | static const struct octeon_dma_map_ops octeon_gen1_ops = { |
| 69 | .phys_to_dma = octeon_gen1_phys_to_dma, |
| 70 | .dma_to_phys = octeon_gen1_dma_to_phys, |
| 71 | }; |
| 72 | |
David Daney | 714c1f5 | 2011-11-22 14:47:04 +0000 | [diff] [blame] | 73 | static dma_addr_t octeon_gen2_phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 74 | { |
| 75 | return octeon_hole_phys_to_dma(paddr); |
| 76 | } |
| 77 | |
| 78 | static phys_addr_t octeon_gen2_dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 79 | { |
| 80 | return octeon_hole_dma_to_phys(daddr); |
| 81 | } |
| 82 | |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 83 | static const struct octeon_dma_map_ops octeon_gen2_ops = { |
| 84 | .phys_to_dma = octeon_gen2_phys_to_dma, |
| 85 | .dma_to_phys = octeon_gen2_dma_to_phys, |
| 86 | }; |
| 87 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 88 | static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 89 | { |
| 90 | if (paddr >= 0x410000000ull && paddr < 0x420000000ull) |
| 91 | paddr -= 0x400000000ull; |
| 92 | |
| 93 | /* Anything in the BAR1 hole or above goes via BAR2 */ |
| 94 | if (paddr >= 0xf0000000ull) |
| 95 | paddr = OCTEON_BAR2_PCI_ADDRESS + paddr; |
| 96 | |
| 97 | return paddr; |
| 98 | } |
| 99 | |
| 100 | static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr) |
| 101 | { |
| 102 | if (daddr >= OCTEON_BAR2_PCI_ADDRESS) |
| 103 | daddr -= OCTEON_BAR2_PCI_ADDRESS; |
| 104 | |
| 105 | if (daddr >= 0x10000000ull && daddr < 0x20000000ull) |
| 106 | daddr += 0x400000000ull; |
| 107 | return daddr; |
| 108 | } |
| 109 | |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 110 | static const struct octeon_dma_map_ops octeon_big_ops = { |
| 111 | .phys_to_dma = octeon_big_phys_to_dma, |
| 112 | .dma_to_phys = octeon_big_dma_to_phys, |
| 113 | }; |
| 114 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 115 | static dma_addr_t octeon_small_phys_to_dma(struct device *dev, |
| 116 | phys_addr_t paddr) |
| 117 | { |
| 118 | if (paddr >= 0x410000000ull && paddr < 0x420000000ull) |
| 119 | paddr -= 0x400000000ull; |
| 120 | |
| 121 | /* Anything not in the BAR1 range goes via BAR2 */ |
| 122 | if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull) |
| 123 | paddr = paddr - octeon_bar1_pci_phys; |
| 124 | else |
| 125 | paddr = OCTEON_BAR2_PCI_ADDRESS + paddr; |
| 126 | |
| 127 | return paddr; |
| 128 | } |
| 129 | |
| 130 | static phys_addr_t octeon_small_dma_to_phys(struct device *dev, |
| 131 | dma_addr_t daddr) |
| 132 | { |
| 133 | if (daddr >= OCTEON_BAR2_PCI_ADDRESS) |
| 134 | daddr -= OCTEON_BAR2_PCI_ADDRESS; |
| 135 | else |
| 136 | daddr += octeon_bar1_pci_phys; |
| 137 | |
| 138 | if (daddr >= 0x10000000ull && daddr < 0x20000000ull) |
| 139 | daddr += 0x400000000ull; |
| 140 | return daddr; |
| 141 | } |
| 142 | |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 143 | static const struct octeon_dma_map_ops octeon_small_ops = { |
| 144 | .phys_to_dma = octeon_small_phys_to_dma, |
| 145 | .dma_to_phys = octeon_small_dma_to_phys, |
| 146 | }; |
| 147 | |
| 148 | static const struct octeon_dma_map_ops *octeon_pci_dma_ops; |
| 149 | |
| 150 | void __init octeon_pci_dma_init(void) |
| 151 | { |
| 152 | switch (octeon_dma_bar_type) { |
| 153 | case OCTEON_DMA_BAR_TYPE_PCIE: |
| 154 | octeon_pci_dma_ops = &octeon_gen1_ops; |
| 155 | break; |
| 156 | case OCTEON_DMA_BAR_TYPE_PCIE2: |
| 157 | octeon_pci_dma_ops = &octeon_gen2_ops; |
| 158 | break; |
| 159 | case OCTEON_DMA_BAR_TYPE_BIG: |
| 160 | octeon_pci_dma_ops = &octeon_big_ops; |
| 161 | break; |
| 162 | case OCTEON_DMA_BAR_TYPE_SMALL: |
| 163 | octeon_pci_dma_ops = &octeon_small_ops; |
| 164 | break; |
| 165 | default: |
| 166 | BUG(); |
| 167 | } |
| 168 | } |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 169 | #endif /* CONFIG_PCI */ |
| 170 | |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 171 | dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 172 | { |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 173 | #ifdef CONFIG_PCI |
| 174 | if (dev && dev_is_pci(dev)) |
| 175 | return octeon_pci_dma_ops->phys_to_dma(dev, paddr); |
| 176 | #endif |
| 177 | return paddr; |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 178 | } |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 179 | |
Christoph Hellwig | b6e0547 | 2018-03-19 11:38:24 +0100 | [diff] [blame] | 180 | phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 181 | { |
Christoph Hellwig | 97f9f97 | 2018-06-15 13:08:34 +0200 | [diff] [blame] | 182 | #ifdef CONFIG_PCI |
| 183 | if (dev && dev_is_pci(dev)) |
| 184 | return octeon_pci_dma_ops->dma_to_phys(dev, daddr); |
| 185 | #endif |
| 186 | return daddr; |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 187 | } |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 188 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 189 | char *octeon_swiotlb; |
| 190 | |
| 191 | void __init plat_swiotlb_setup(void) |
| 192 | { |
Jiaxun Yang | 6cda3a5 | 2019-08-19 22:23:06 +0800 | [diff] [blame] | 193 | struct memblock_region *mem; |
Ralf Baechle | 15d45cc | 2014-11-22 00:22:09 +0100 | [diff] [blame] | 194 | phys_addr_t max_addr; |
| 195 | phys_addr_t addr_size; |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 196 | size_t swiotlbsize; |
| 197 | unsigned long swiotlb_nslabs; |
| 198 | |
| 199 | max_addr = 0; |
| 200 | addr_size = 0; |
| 201 | |
Jiaxun Yang | 6cda3a5 | 2019-08-19 22:23:06 +0800 | [diff] [blame] | 202 | for_each_memblock(memory, mem) { |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 203 | /* These addresses map low for PCI. */ |
Jiaxun Yang | 6cda3a5 | 2019-08-19 22:23:06 +0800 | [diff] [blame] | 204 | if (mem->base > 0x410000000ull && !OCTEON_IS_OCTEON2()) |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 205 | continue; |
| 206 | |
Jiaxun Yang | 6cda3a5 | 2019-08-19 22:23:06 +0800 | [diff] [blame] | 207 | addr_size += mem->size; |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 208 | |
Jiaxun Yang | 6cda3a5 | 2019-08-19 22:23:06 +0800 | [diff] [blame] | 209 | if (max_addr < mem->base + mem->size) |
| 210 | max_addr = mem->base + mem->size; |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 211 | |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 212 | } |
| 213 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 214 | swiotlbsize = PAGE_SIZE; |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 215 | |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 216 | #ifdef CONFIG_PCI |
| 217 | /* |
| 218 | * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory |
| 219 | * size to a maximum of 64MB |
| 220 | */ |
| 221 | if (OCTEON_IS_MODEL(OCTEON_CN31XX) |
| 222 | || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) { |
| 223 | swiotlbsize = addr_size / 4; |
| 224 | if (swiotlbsize > 64 * (1<<20)) |
| 225 | swiotlbsize = 64 * (1<<20); |
| 226 | } else if (max_addr > 0xf0000000ul) { |
| 227 | /* |
| 228 | * Otherwise only allocate a big iotlb if there is |
| 229 | * memory past the BAR1 hole. |
| 230 | */ |
| 231 | swiotlbsize = 64 * (1<<20); |
| 232 | } |
David Daney | e8635b4 | 2009-04-23 17:44:38 -0700 | [diff] [blame] | 233 | #endif |
Aaro Koskinen | a8667d7 | 2015-03-04 23:08:49 +0200 | [diff] [blame] | 234 | #ifdef CONFIG_USB_OHCI_HCD_PLATFORM |
David Daney | 714c1f5 | 2011-11-22 14:47:04 +0000 | [diff] [blame] | 235 | /* OCTEON II ohci is only 32-bit. */ |
David Daney | debe6a6 | 2015-01-15 16:11:14 +0300 | [diff] [blame] | 236 | if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) |
David Daney | 714c1f5 | 2011-11-22 14:47:04 +0000 | [diff] [blame] | 237 | swiotlbsize = 64 * (1<<20); |
| 238 | #endif |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 239 | swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; |
| 240 | swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE); |
| 241 | swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT; |
| 242 | |
Mike Rapoport | e8625dc | 2018-10-30 15:08:54 -0700 | [diff] [blame] | 243 | octeon_swiotlb = memblock_alloc_low(swiotlbsize, PAGE_SIZE); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 244 | if (!octeon_swiotlb) |
| 245 | panic("%s: Failed to allocate %zu bytes align=%lx\n", |
| 246 | __func__, swiotlbsize, PAGE_SIZE); |
David Daney | b93b2ab | 2010-10-01 13:27:34 -0700 | [diff] [blame] | 247 | |
Yinghai Lu | ac2cbab | 2013-01-24 12:20:16 -0800 | [diff] [blame] | 248 | if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM) |
| 249 | panic("Cannot allocate SWIOTLB buffer"); |
David Daney | 5b3b168 | 2009-01-08 16:46:40 -0800 | [diff] [blame] | 250 | } |