Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 2 | /* |
| 3 | * A fairly generic DMA-API to IOMMU-API glue layer. |
| 4 | * |
| 5 | * Copyright (C) 2014-2015 ARM Ltd. |
| 6 | * |
| 7 | * based in part on arch/arm/mm/dma-mapping.c: |
| 8 | * Copyright (C) 2000-2004 Russell King |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 9 | */ |
| 10 | |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 11 | #include <linux/acpi_iort.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 12 | #include <linux/device.h> |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 13 | #include <linux/dma-contiguous.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 14 | #include <linux/dma-iommu.h> |
Christoph Hellwig | af751d4 | 2019-05-20 09:29:27 +0200 | [diff] [blame] | 15 | #include <linux/dma-noncoherent.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 17 | #include <linux/huge_mm.h> |
| 18 | #include <linux/iommu.h> |
| 19 | #include <linux/iova.h> |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 20 | #include <linux/irq.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 21 | #include <linux/mm.h> |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 22 | #include <linux/mutex.h> |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 23 | #include <linux/pci.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 24 | #include <linux/scatterlist.h> |
| 25 | #include <linux/vmalloc.h> |
Tom Murphy | 795bbbb | 2019-09-08 09:56:39 -0700 | [diff] [blame] | 26 | #include <linux/crash_dump.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 27 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 28 | struct iommu_dma_msi_page { |
| 29 | struct list_head list; |
| 30 | dma_addr_t iova; |
| 31 | phys_addr_t phys; |
| 32 | }; |
| 33 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 34 | enum iommu_dma_cookie_type { |
| 35 | IOMMU_DMA_IOVA_COOKIE, |
| 36 | IOMMU_DMA_MSI_COOKIE, |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 37 | }; |
| 38 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 39 | struct iommu_dma_cookie { |
| 40 | enum iommu_dma_cookie_type type; |
| 41 | union { |
| 42 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
| 43 | struct iova_domain iovad; |
| 44 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
| 45 | dma_addr_t msi_iova; |
| 46 | }; |
| 47 | struct list_head msi_page_list; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 48 | |
| 49 | /* Domain for flush queue callback; NULL if flush queue not in use */ |
| 50 | struct iommu_domain *fq_domain; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 51 | }; |
| 52 | |
| 53 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
| 54 | { |
| 55 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) |
| 56 | return cookie->iovad.granule; |
| 57 | return PAGE_SIZE; |
| 58 | } |
| 59 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 60 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
| 61 | { |
| 62 | struct iommu_dma_cookie *cookie; |
| 63 | |
| 64 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
| 65 | if (cookie) { |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 66 | INIT_LIST_HEAD(&cookie->msi_page_list); |
| 67 | cookie->type = type; |
| 68 | } |
| 69 | return cookie; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 72 | /** |
| 73 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain |
| 74 | * @domain: IOMMU domain to prepare for DMA-API usage |
| 75 | * |
| 76 | * IOMMU drivers should normally call this from their domain_alloc |
| 77 | * callback when domain->type == IOMMU_DOMAIN_DMA. |
| 78 | */ |
| 79 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
| 80 | { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 81 | if (domain->iova_cookie) |
| 82 | return -EEXIST; |
| 83 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 84 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); |
| 85 | if (!domain->iova_cookie) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 86 | return -ENOMEM; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 87 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 88 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 89 | } |
| 90 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
| 91 | |
| 92 | /** |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 93 | * iommu_get_msi_cookie - Acquire just MSI remapping resources |
| 94 | * @domain: IOMMU domain to prepare |
| 95 | * @base: Start address of IOVA region for MSI mappings |
| 96 | * |
| 97 | * Users who manage their own IOVA allocation and do not want DMA API support, |
| 98 | * but would still like to take advantage of automatic MSI remapping, can use |
| 99 | * this to initialise their own domain appropriately. Users should reserve a |
| 100 | * contiguous IOVA region, starting at @base, large enough to accommodate the |
| 101 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address |
| 102 | * used by the devices attached to @domain. |
| 103 | */ |
| 104 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) |
| 105 | { |
| 106 | struct iommu_dma_cookie *cookie; |
| 107 | |
| 108 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
| 109 | return -EINVAL; |
| 110 | |
| 111 | if (domain->iova_cookie) |
| 112 | return -EEXIST; |
| 113 | |
| 114 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
| 115 | if (!cookie) |
| 116 | return -ENOMEM; |
| 117 | |
| 118 | cookie->msi_iova = base; |
| 119 | domain->iova_cookie = cookie; |
| 120 | return 0; |
| 121 | } |
| 122 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
| 123 | |
| 124 | /** |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 125 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 126 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
| 127 | * iommu_get_msi_cookie() |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 128 | * |
| 129 | * IOMMU drivers should normally call this from their domain_free callback. |
| 130 | */ |
| 131 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
| 132 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 133 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 134 | struct iommu_dma_msi_page *msi, *tmp; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 135 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 136 | if (!cookie) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 137 | return; |
| 138 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 139 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 140 | put_iova_domain(&cookie->iovad); |
| 141 | |
| 142 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
| 143 | list_del(&msi->list); |
| 144 | kfree(msi); |
| 145 | } |
| 146 | kfree(cookie); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 147 | domain->iova_cookie = NULL; |
| 148 | } |
| 149 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
| 150 | |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 151 | /** |
| 152 | * iommu_dma_get_resv_regions - Reserved region driver helper |
| 153 | * @dev: Device from iommu_get_resv_regions() |
| 154 | * @list: Reserved region list from iommu_get_resv_regions() |
| 155 | * |
| 156 | * IOMMU drivers can use this to implement their .get_resv_regions callback |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 157 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
| 158 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI |
| 159 | * reservation. |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 160 | */ |
| 161 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 162 | { |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 163 | |
Joerg Roedel | 98cc4f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 164 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 165 | iort_iommu_msi_get_resv_regions(dev, list); |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 166 | |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 167 | } |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 168 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 169 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 170 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
| 171 | phys_addr_t start, phys_addr_t end) |
| 172 | { |
| 173 | struct iova_domain *iovad = &cookie->iovad; |
| 174 | struct iommu_dma_msi_page *msi_page; |
| 175 | int i, num_pages; |
| 176 | |
| 177 | start -= iova_offset(iovad, start); |
| 178 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); |
| 179 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 180 | for (i = 0; i < num_pages; i++) { |
Marc Zyngier | 65ac74f | 2020-03-04 11:11:17 +0000 | [diff] [blame] | 181 | msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); |
| 182 | if (!msi_page) |
| 183 | return -ENOMEM; |
| 184 | |
| 185 | msi_page->phys = start; |
| 186 | msi_page->iova = start; |
| 187 | INIT_LIST_HEAD(&msi_page->list); |
| 188 | list_add(&msi_page->list, &cookie->msi_page_list); |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 189 | start += iovad->granule; |
| 190 | } |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 195 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 196 | struct iova_domain *iovad) |
| 197 | { |
| 198 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
| 199 | struct resource_entry *window; |
| 200 | unsigned long lo, hi; |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 201 | phys_addr_t start = 0, end; |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 202 | |
| 203 | resource_list_for_each_entry(window, &bridge->windows) { |
| 204 | if (resource_type(window->res) != IORESOURCE_MEM) |
| 205 | continue; |
| 206 | |
| 207 | lo = iova_pfn(iovad, window->res->start - window->offset); |
| 208 | hi = iova_pfn(iovad, window->res->end - window->offset); |
| 209 | reserve_iova(iovad, lo, hi); |
| 210 | } |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 211 | |
| 212 | /* Get reserved DMA windows from host bridge */ |
| 213 | resource_list_for_each_entry(window, &bridge->dma_ranges) { |
| 214 | end = window->res->start - window->offset; |
| 215 | resv_iova: |
| 216 | if (end > start) { |
| 217 | lo = iova_pfn(iovad, start); |
| 218 | hi = iova_pfn(iovad, end); |
| 219 | reserve_iova(iovad, lo, hi); |
| 220 | } else { |
| 221 | /* dma_ranges list should be sorted */ |
| 222 | dev_err(&dev->dev, "Failed to reserve IOVA\n"); |
| 223 | return -EINVAL; |
| 224 | } |
| 225 | |
| 226 | start = window->res->end - window->offset + 1; |
| 227 | /* If window is last entry */ |
| 228 | if (window->node.next == &bridge->dma_ranges && |
Arnd Bergmann | 29fcea8 | 2019-06-17 15:30:54 +0200 | [diff] [blame] | 229 | end != ~(phys_addr_t)0) { |
| 230 | end = ~(phys_addr_t)0; |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 231 | goto resv_iova; |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | return 0; |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 236 | } |
| 237 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 238 | static int iova_reserve_iommu_regions(struct device *dev, |
| 239 | struct iommu_domain *domain) |
| 240 | { |
| 241 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 242 | struct iova_domain *iovad = &cookie->iovad; |
| 243 | struct iommu_resv_region *region; |
| 244 | LIST_HEAD(resv_regions); |
| 245 | int ret = 0; |
| 246 | |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 247 | if (dev_is_pci(dev)) { |
| 248 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
| 249 | if (ret) |
| 250 | return ret; |
| 251 | } |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 252 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 253 | iommu_get_resv_regions(dev, &resv_regions); |
| 254 | list_for_each_entry(region, &resv_regions, list) { |
| 255 | unsigned long lo, hi; |
| 256 | |
| 257 | /* We ARE the software that manages these! */ |
| 258 | if (region->type == IOMMU_RESV_SW_MSI) |
| 259 | continue; |
| 260 | |
| 261 | lo = iova_pfn(iovad, region->start); |
| 262 | hi = iova_pfn(iovad, region->start + region->length - 1); |
| 263 | reserve_iova(iovad, lo, hi); |
| 264 | |
| 265 | if (region->type == IOMMU_RESV_MSI) |
| 266 | ret = cookie_init_hw_msi_region(cookie, region->start, |
| 267 | region->start + region->length); |
| 268 | if (ret) |
| 269 | break; |
| 270 | } |
| 271 | iommu_put_resv_regions(dev, &resv_regions); |
| 272 | |
| 273 | return ret; |
| 274 | } |
| 275 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 276 | static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
| 277 | { |
| 278 | struct iommu_dma_cookie *cookie; |
| 279 | struct iommu_domain *domain; |
| 280 | |
| 281 | cookie = container_of(iovad, struct iommu_dma_cookie, iovad); |
| 282 | domain = cookie->fq_domain; |
| 283 | /* |
| 284 | * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE |
| 285 | * implies that ops->flush_iotlb_all must be non-NULL. |
| 286 | */ |
| 287 | domain->ops->flush_iotlb_all(domain); |
| 288 | } |
| 289 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 290 | /** |
| 291 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
| 292 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
| 293 | * @base: IOVA at which the mappable address space starts |
| 294 | * @size: Size of IOVA space |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 295 | * @dev: Device the domain is being initialised for |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 296 | * |
| 297 | * @base and @size should be exact multiples of IOMMU page granularity to |
| 298 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
| 299 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
| 300 | * any change which could make prior IOVAs invalid will fail. |
| 301 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 302 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 303 | u64 size, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 304 | { |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 305 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
Shaokun Zhang | c61a463 | 2019-01-24 15:10:02 +0800 | [diff] [blame] | 306 | unsigned long order, base_pfn; |
Yunsheng Lin | 6b0c54e | 2019-08-24 09:47:12 +0800 | [diff] [blame] | 307 | struct iova_domain *iovad; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 308 | int attr; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 309 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 310 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
| 311 | return -EINVAL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 312 | |
Yunsheng Lin | 6b0c54e | 2019-08-24 09:47:12 +0800 | [diff] [blame] | 313 | iovad = &cookie->iovad; |
| 314 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 315 | /* Use the smallest supported page size for IOVA granularity */ |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 316 | order = __ffs(domain->pgsize_bitmap); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 317 | base_pfn = max_t(unsigned long, 1, base >> order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 318 | |
| 319 | /* Check the domain allows at least some access to the device... */ |
| 320 | if (domain->geometry.force_aperture) { |
| 321 | if (base > domain->geometry.aperture_end || |
| 322 | base + size <= domain->geometry.aperture_start) { |
| 323 | pr_warn("specified DMA range outside IOMMU capability\n"); |
| 324 | return -EFAULT; |
| 325 | } |
| 326 | /* ...then finally give it a kicking to make sure it fits */ |
| 327 | base_pfn = max_t(unsigned long, base_pfn, |
| 328 | domain->geometry.aperture_start >> order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 329 | } |
| 330 | |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 331 | /* start_pfn is always nonzero for an already-initialised domain */ |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 332 | if (iovad->start_pfn) { |
| 333 | if (1UL << order != iovad->granule || |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 334 | base_pfn != iovad->start_pfn) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 335 | pr_warn("Incompatible range for DMA domain\n"); |
| 336 | return -EFAULT; |
| 337 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 338 | |
| 339 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 340 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 341 | |
Zhen Lei | aa3ac94 | 2017-09-21 16:52:45 +0100 | [diff] [blame] | 342 | init_iova_domain(iovad, 1UL << order, base_pfn); |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 343 | |
| 344 | if (!cookie->fq_domain && !iommu_domain_get_attr(domain, |
| 345 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { |
| 346 | cookie->fq_domain = domain; |
| 347 | init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); |
| 348 | } |
| 349 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 350 | if (!dev) |
| 351 | return 0; |
| 352 | |
| 353 | return iova_reserve_iommu_regions(dev, domain); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 354 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 355 | |
Tom Murphy | 795bbbb | 2019-09-08 09:56:39 -0700 | [diff] [blame] | 356 | static int iommu_dma_deferred_attach(struct device *dev, |
| 357 | struct iommu_domain *domain) |
| 358 | { |
| 359 | const struct iommu_ops *ops = domain->ops; |
| 360 | |
| 361 | if (!is_kdump_kernel()) |
| 362 | return 0; |
| 363 | |
| 364 | if (unlikely(ops->is_attach_deferred && |
| 365 | ops->is_attach_deferred(domain, dev))) |
| 366 | return iommu_attach_device(domain, dev); |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 371 | /** |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 372 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
| 373 | * page flags. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 374 | * @dir: Direction of DMA transfer |
| 375 | * @coherent: Is the DMA master cache-coherent? |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 376 | * @attrs: DMA attributes for the mapping |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 377 | * |
| 378 | * Return: corresponding IOMMU API page protection flags |
| 379 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 380 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 381 | unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 382 | { |
| 383 | int prot = coherent ? IOMMU_CACHE : 0; |
| 384 | |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 385 | if (attrs & DMA_ATTR_PRIVILEGED) |
| 386 | prot |= IOMMU_PRIV; |
| 387 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 388 | switch (dir) { |
| 389 | case DMA_BIDIRECTIONAL: |
| 390 | return prot | IOMMU_READ | IOMMU_WRITE; |
| 391 | case DMA_TO_DEVICE: |
| 392 | return prot | IOMMU_READ; |
| 393 | case DMA_FROM_DEVICE: |
| 394 | return prot | IOMMU_WRITE; |
| 395 | default: |
| 396 | return 0; |
| 397 | } |
| 398 | } |
| 399 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 400 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
Robin Murphy | bd036d2 | 2019-12-11 18:33:26 +0000 | [diff] [blame] | 401 | size_t size, u64 dma_limit, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 402 | { |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 403 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 404 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 405 | unsigned long shift, iova_len, iova = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 406 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 407 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
| 408 | cookie->msi_iova += size; |
| 409 | return cookie->msi_iova - size; |
| 410 | } |
| 411 | |
| 412 | shift = iova_shift(iovad); |
| 413 | iova_len = size >> shift; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 414 | /* |
| 415 | * Freeing non-power-of-two-sized allocations back into the IOVA caches |
| 416 | * will come back to bite us badly, so we have to waste a bit of space |
| 417 | * rounding up anything cacheable to make sure that can't happen. The |
| 418 | * order of the unadjusted size will still match upon freeing. |
| 419 | */ |
| 420 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
| 421 | iova_len = roundup_pow_of_two(iova_len); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 422 | |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 423 | dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); |
Robin Murphy | 03bfdc3 | 2018-07-23 23:16:10 +0100 | [diff] [blame] | 424 | |
Robin Murphy | c987ff0 | 2016-08-09 17:31:35 +0100 | [diff] [blame] | 425 | if (domain->geometry.force_aperture) |
Robin Murphy | bd036d2 | 2019-12-11 18:33:26 +0000 | [diff] [blame] | 426 | dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 427 | |
| 428 | /* Try to get PCI devices a SAC address */ |
| 429 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 430 | iova = alloc_iova_fast(iovad, iova_len, |
| 431 | DMA_BIT_MASK(32) >> shift, false); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 432 | |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 433 | if (!iova) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 434 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
| 435 | true); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 436 | |
| 437 | return (dma_addr_t)iova << shift; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 438 | } |
| 439 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 440 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
| 441 | dma_addr_t iova, size_t size) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 442 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 443 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 444 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 445 | /* The MSI case is only ever cleaning up its most recent allocation */ |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 446 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 447 | cookie->msi_iova -= size; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 448 | else if (cookie->fq_domain) /* non-strict mode */ |
| 449 | queue_iova(iovad, iova_pfn(iovad, iova), |
| 450 | size >> iova_shift(iovad), 0); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 451 | else |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 452 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
| 453 | size >> iova_shift(iovad)); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 454 | } |
| 455 | |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 456 | static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 457 | size_t size) |
| 458 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 459 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 460 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 461 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 462 | size_t iova_off = iova_offset(iovad, dma_addr); |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 463 | struct iommu_iotlb_gather iotlb_gather; |
| 464 | size_t unmapped; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 465 | |
| 466 | dma_addr -= iova_off; |
| 467 | size = iova_align(iovad, size + iova_off); |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 468 | iommu_iotlb_gather_init(&iotlb_gather); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 469 | |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 470 | unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); |
| 471 | WARN_ON(unmapped != size); |
| 472 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 473 | if (!cookie->fq_domain) |
Will Deacon | a7d20dc | 2019-07-02 16:43:48 +0100 | [diff] [blame] | 474 | iommu_tlb_sync(domain, &iotlb_gather); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 475 | iommu_dma_free_iova(cookie, dma_addr, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 476 | } |
| 477 | |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 478 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
Robin Murphy | bd036d2 | 2019-12-11 18:33:26 +0000 | [diff] [blame] | 479 | size_t size, int prot, u64 dma_mask) |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 480 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 481 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 482 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 483 | struct iova_domain *iovad = &cookie->iovad; |
| 484 | size_t iova_off = iova_offset(iovad, phys); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 485 | dma_addr_t iova; |
| 486 | |
Tom Murphy | 795bbbb | 2019-09-08 09:56:39 -0700 | [diff] [blame] | 487 | if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
| 488 | return DMA_MAPPING_ERROR; |
| 489 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 490 | size = iova_align(iovad, size + iova_off); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 491 | |
Tom Murphy | 6e23502 | 2019-09-08 09:56:40 -0700 | [diff] [blame] | 492 | iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 493 | if (!iova) |
| 494 | return DMA_MAPPING_ERROR; |
| 495 | |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 496 | if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 497 | iommu_dma_free_iova(cookie, iova, size); |
| 498 | return DMA_MAPPING_ERROR; |
| 499 | } |
| 500 | return iova + iova_off; |
| 501 | } |
| 502 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 503 | static void __iommu_dma_free_pages(struct page **pages, int count) |
| 504 | { |
| 505 | while (count--) |
| 506 | __free_page(pages[count]); |
| 507 | kvfree(pages); |
| 508 | } |
| 509 | |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 510 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
| 511 | unsigned int count, unsigned long order_mask, gfp_t gfp) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 512 | { |
| 513 | struct page **pages; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 514 | unsigned int i = 0, nid = dev_to_node(dev); |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 515 | |
| 516 | order_mask &= (2U << MAX_ORDER) - 1; |
| 517 | if (!order_mask) |
| 518 | return NULL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 519 | |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 520 | pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 521 | if (!pages) |
| 522 | return NULL; |
| 523 | |
| 524 | /* IOMMU can map any pages, so himem can also be used here */ |
| 525 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; |
| 526 | |
| 527 | while (count) { |
| 528 | struct page *page = NULL; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 529 | unsigned int order_size; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 530 | |
| 531 | /* |
| 532 | * Higher-order allocations are a convenience rather |
| 533 | * than a necessity, hence using __GFP_NORETRY until |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 534 | * falling back to minimum-order allocations. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 535 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 536 | for (order_mask &= (2U << __fls(count)) - 1; |
| 537 | order_mask; order_mask &= ~order_size) { |
| 538 | unsigned int order = __fls(order_mask); |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 539 | gfp_t alloc_flags = gfp; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 540 | |
| 541 | order_size = 1U << order; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 542 | if (order_mask > order_size) |
| 543 | alloc_flags |= __GFP_NORETRY; |
| 544 | page = alloc_pages_node(nid, alloc_flags, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 545 | if (!page) |
| 546 | continue; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 547 | if (!order) |
| 548 | break; |
| 549 | if (!PageCompound(page)) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 550 | split_page(page, order); |
| 551 | break; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 552 | } else if (!split_huge_page(page)) { |
| 553 | break; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 554 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 555 | __free_pages(page, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 556 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 557 | if (!page) { |
| 558 | __iommu_dma_free_pages(pages, i); |
| 559 | return NULL; |
| 560 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 561 | count -= order_size; |
| 562 | while (order_size--) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 563 | pages[i++] = page++; |
| 564 | } |
| 565 | return pages; |
| 566 | } |
| 567 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 568 | /** |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 569 | * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 570 | * @dev: Device to allocate memory for. Must be a real device |
| 571 | * attached to an iommu_dma_domain |
| 572 | * @size: Size of buffer in bytes |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 573 | * @dma_handle: Out argument for allocated DMA handle |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 574 | * @gfp: Allocation flags |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 575 | * @attrs: DMA attributes for this allocation |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 576 | * |
| 577 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, |
| 578 | * but an IOMMU which supports smaller pages might not map the whole thing. |
| 579 | * |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 580 | * Return: Mapped virtual address, or NULL on failure. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 581 | */ |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 582 | static void *iommu_dma_alloc_remap(struct device *dev, size_t size, |
| 583 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 584 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 585 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 586 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 587 | struct iova_domain *iovad = &cookie->iovad; |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 588 | bool coherent = dev_is_dma_coherent(dev); |
| 589 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 590 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 591 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 592 | struct page **pages; |
| 593 | struct sg_table sgt; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 594 | dma_addr_t iova; |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 595 | void *vaddr; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 596 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 597 | *dma_handle = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 598 | |
Tom Murphy | 795bbbb | 2019-09-08 09:56:39 -0700 | [diff] [blame] | 599 | if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
| 600 | return NULL; |
| 601 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 602 | min_size = alloc_sizes & -alloc_sizes; |
| 603 | if (min_size < PAGE_SIZE) { |
| 604 | min_size = PAGE_SIZE; |
| 605 | alloc_sizes |= PAGE_SIZE; |
| 606 | } else { |
| 607 | size = ALIGN(size, min_size); |
| 608 | } |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 609 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 610 | alloc_sizes = min_size; |
| 611 | |
| 612 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 613 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
| 614 | gfp); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 615 | if (!pages) |
| 616 | return NULL; |
| 617 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 618 | size = iova_align(iovad, size); |
| 619 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 620 | if (!iova) |
| 621 | goto out_free_pages; |
| 622 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 623 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
| 624 | goto out_free_iova; |
| 625 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 626 | if (!(ioprot & IOMMU_CACHE)) { |
Christoph Hellwig | 23f88e0 | 2019-05-20 09:29:28 +0200 | [diff] [blame] | 627 | struct scatterlist *sg; |
| 628 | int i; |
| 629 | |
| 630 | for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) |
| 631 | arch_dma_prep_coherent(sg_page(sg), sg->length); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 632 | } |
| 633 | |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 634 | if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 635 | < size) |
| 636 | goto out_free_sg; |
| 637 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 638 | vaddr = dma_common_pages_remap(pages, size, prot, |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 639 | __builtin_return_address(0)); |
| 640 | if (!vaddr) |
| 641 | goto out_unmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 642 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 643 | *dma_handle = iova; |
| 644 | sg_free_table(&sgt); |
| 645 | return vaddr; |
| 646 | |
| 647 | out_unmap: |
| 648 | __iommu_dma_unmap(dev, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 649 | out_free_sg: |
| 650 | sg_free_table(&sgt); |
| 651 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 652 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 653 | out_free_pages: |
| 654 | __iommu_dma_free_pages(pages, count); |
| 655 | return NULL; |
| 656 | } |
| 657 | |
| 658 | /** |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 659 | * __iommu_dma_mmap - Map a buffer into provided user VMA |
| 660 | * @pages: Array representing buffer from __iommu_dma_alloc() |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 661 | * @size: Size of buffer in bytes |
| 662 | * @vma: VMA describing requested userspace mapping |
| 663 | * |
| 664 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible |
| 665 | * for verifying the correct size and protection of @vma beforehand. |
| 666 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 667 | static int __iommu_dma_mmap(struct page **pages, size_t size, |
| 668 | struct vm_area_struct *vma) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 669 | { |
Souptick Joarder | b0d0084 | 2019-05-13 17:22:15 -0700 | [diff] [blame] | 670 | return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 671 | } |
| 672 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 673 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
| 674 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 675 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 676 | phys_addr_t phys; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 677 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 678 | if (dev_is_dma_coherent(dev)) |
| 679 | return; |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 680 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 681 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 682 | arch_sync_dma_for_cpu(phys, size, dir); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 683 | } |
| 684 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 685 | static void iommu_dma_sync_single_for_device(struct device *dev, |
| 686 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 687 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 688 | phys_addr_t phys; |
| 689 | |
| 690 | if (dev_is_dma_coherent(dev)) |
| 691 | return; |
| 692 | |
| 693 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 694 | arch_sync_dma_for_device(phys, size, dir); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 695 | } |
| 696 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 697 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
| 698 | struct scatterlist *sgl, int nelems, |
| 699 | enum dma_data_direction dir) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 700 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 701 | struct scatterlist *sg; |
| 702 | int i; |
| 703 | |
| 704 | if (dev_is_dma_coherent(dev)) |
| 705 | return; |
| 706 | |
| 707 | for_each_sg(sgl, sg, nelems, i) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 708 | arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 709 | } |
| 710 | |
| 711 | static void iommu_dma_sync_sg_for_device(struct device *dev, |
| 712 | struct scatterlist *sgl, int nelems, |
| 713 | enum dma_data_direction dir) |
| 714 | { |
| 715 | struct scatterlist *sg; |
| 716 | int i; |
| 717 | |
| 718 | if (dev_is_dma_coherent(dev)) |
| 719 | return; |
| 720 | |
| 721 | for_each_sg(sgl, sg, nelems, i) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 722 | arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 723 | } |
| 724 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 725 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
| 726 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 727 | unsigned long attrs) |
| 728 | { |
| 729 | phys_addr_t phys = page_to_phys(page) + offset; |
| 730 | bool coherent = dev_is_dma_coherent(dev); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 731 | int prot = dma_info_to_prot(dir, coherent, attrs); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 732 | dma_addr_t dma_handle; |
| 733 | |
Tom Murphy | 6e23502 | 2019-09-08 09:56:40 -0700 | [diff] [blame] | 734 | dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 735 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
| 736 | dma_handle != DMA_MAPPING_ERROR) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 737 | arch_sync_dma_for_device(phys, size, dir); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 738 | return dma_handle; |
| 739 | } |
| 740 | |
| 741 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
| 742 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 743 | { |
| 744 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 745 | iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 746 | __iommu_dma_unmap(dev, dma_handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 747 | } |
| 748 | |
| 749 | /* |
| 750 | * Prepare a successfully-mapped scatterlist to give back to the caller. |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 751 | * |
| 752 | * At this point the segments are already laid out by iommu_dma_map_sg() to |
| 753 | * avoid individually crossing any boundaries, so we merely need to check a |
| 754 | * segment's start address to avoid concatenating across one. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 755 | */ |
| 756 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 757 | dma_addr_t dma_addr) |
| 758 | { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 759 | struct scatterlist *s, *cur = sg; |
| 760 | unsigned long seg_mask = dma_get_seg_boundary(dev); |
| 761 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); |
| 762 | int i, count = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 763 | |
| 764 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 765 | /* Restore this segment's original unaligned fields first */ |
| 766 | unsigned int s_iova_off = sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 767 | unsigned int s_length = sg_dma_len(s); |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 768 | unsigned int s_iova_len = s->length; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 769 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 770 | s->offset += s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 771 | s->length = s_length; |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 772 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 773 | sg_dma_len(s) = 0; |
| 774 | |
| 775 | /* |
| 776 | * Now fill in the real DMA data. If... |
| 777 | * - there is a valid output segment to append to |
| 778 | * - and this segment starts on an IOVA page boundary |
| 779 | * - but doesn't fall at a segment boundary |
| 780 | * - and wouldn't make the resulting output segment too long |
| 781 | */ |
| 782 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && |
Robin Murphy | ab2cbeb | 2019-07-29 17:46:00 +0100 | [diff] [blame] | 783 | (max_len - cur_len >= s_length)) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 784 | /* ...then concatenate it with the previous one */ |
| 785 | cur_len += s_length; |
| 786 | } else { |
| 787 | /* Otherwise start the next output segment */ |
| 788 | if (i > 0) |
| 789 | cur = sg_next(cur); |
| 790 | cur_len = s_length; |
| 791 | count++; |
| 792 | |
| 793 | sg_dma_address(cur) = dma_addr + s_iova_off; |
| 794 | } |
| 795 | |
| 796 | sg_dma_len(cur) = cur_len; |
| 797 | dma_addr += s_iova_len; |
| 798 | |
| 799 | if (s_length + s_iova_off < s_iova_len) |
| 800 | cur_len = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 801 | } |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 802 | return count; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 803 | } |
| 804 | |
| 805 | /* |
| 806 | * If mapping failed, then just restore the original list, |
| 807 | * but making sure the DMA fields are invalidated. |
| 808 | */ |
| 809 | static void __invalidate_sg(struct scatterlist *sg, int nents) |
| 810 | { |
| 811 | struct scatterlist *s; |
| 812 | int i; |
| 813 | |
| 814 | for_each_sg(sg, s, nents, i) { |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 815 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
Robin Murphy | 07b48ac | 2016-03-10 19:28:12 +0000 | [diff] [blame] | 816 | s->offset += sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 817 | if (sg_dma_len(s)) |
| 818 | s->length = sg_dma_len(s); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 819 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 820 | sg_dma_len(s) = 0; |
| 821 | } |
| 822 | } |
| 823 | |
| 824 | /* |
| 825 | * The DMA API client is passing in a scatterlist which could describe |
| 826 | * any old buffer layout, but the IOMMU API requires everything to be |
| 827 | * aligned to IOMMU pages. Hence the need for this complicated bit of |
| 828 | * impedance-matching, to be able to hand off a suitably-aligned list, |
| 829 | * but still preserve the original offsets and sizes for the caller. |
| 830 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 831 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 832 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 833 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 834 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 835 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 836 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 837 | struct scatterlist *s, *prev = NULL; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 838 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 839 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 840 | size_t iova_len = 0; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 841 | unsigned long mask = dma_get_seg_boundary(dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 842 | int i; |
| 843 | |
Tom Murphy | 795bbbb | 2019-09-08 09:56:39 -0700 | [diff] [blame] | 844 | if (unlikely(iommu_dma_deferred_attach(dev, domain))) |
| 845 | return 0; |
| 846 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 847 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 848 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); |
| 849 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 850 | /* |
| 851 | * Work out how much IOVA space we need, and align the segments to |
| 852 | * IOVA granules for the IOMMU driver to handle. With some clever |
| 853 | * trickery we can modify the list in-place, but reversibly, by |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 854 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 855 | */ |
| 856 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 857 | size_t s_iova_off = iova_offset(iovad, s->offset); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 858 | size_t s_length = s->length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 859 | size_t pad_len = (mask - iova_len + 1) & mask; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 860 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 861 | sg_dma_address(s) = s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 862 | sg_dma_len(s) = s_length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 863 | s->offset -= s_iova_off; |
| 864 | s_length = iova_align(iovad, s_length + s_iova_off); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 865 | s->length = s_length; |
| 866 | |
| 867 | /* |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 868 | * Due to the alignment of our single IOVA allocation, we can |
| 869 | * depend on these assumptions about the segment boundary mask: |
| 870 | * - If mask size >= IOVA size, then the IOVA range cannot |
| 871 | * possibly fall across a boundary, so we don't care. |
| 872 | * - If mask size < IOVA size, then the IOVA range must start |
| 873 | * exactly on a boundary, therefore we can lay things out |
| 874 | * based purely on segment lengths without needing to know |
| 875 | * the actual addresses beforehand. |
| 876 | * - The mask must be a power of 2, so pad_len == 0 if |
| 877 | * iova_len == 0, thus we cannot dereference prev the first |
| 878 | * time through here (i.e. before it has a meaningful value). |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 879 | */ |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 880 | if (pad_len && pad_len < s_length - 1) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 881 | prev->length += pad_len; |
| 882 | iova_len += pad_len; |
| 883 | } |
| 884 | |
| 885 | iova_len += s_length; |
| 886 | prev = s; |
| 887 | } |
| 888 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 889 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 890 | if (!iova) |
| 891 | goto out_restore_sg; |
| 892 | |
| 893 | /* |
| 894 | * We'll leave any physical concatenation to the IOMMU driver's |
| 895 | * implementation - it knows better than we do. |
| 896 | */ |
Tom Murphy | 781ca2d | 2019-09-08 09:56:38 -0700 | [diff] [blame] | 897 | if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 898 | goto out_free_iova; |
| 899 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 900 | return __finalise_sg(dev, sg, nents, iova); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 901 | |
| 902 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 903 | iommu_dma_free_iova(cookie, iova, iova_len); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 904 | out_restore_sg: |
| 905 | __invalidate_sg(sg, nents); |
| 906 | return 0; |
| 907 | } |
| 908 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 909 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 910 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 911 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 912 | dma_addr_t start, end; |
| 913 | struct scatterlist *tmp; |
| 914 | int i; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 915 | |
Nathan Chancellor | 1b961423 | 2019-05-29 01:15:32 -0700 | [diff] [blame] | 916 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 917 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); |
| 918 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 919 | /* |
| 920 | * The scatterlist segments are mapped into a single |
| 921 | * contiguous IOVA allocation, so this is incredibly easy. |
| 922 | */ |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 923 | start = sg_dma_address(sg); |
| 924 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { |
| 925 | if (sg_dma_len(tmp) == 0) |
| 926 | break; |
| 927 | sg = tmp; |
| 928 | } |
| 929 | end = sg_dma_address(sg) + sg_dma_len(sg); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 930 | __iommu_dma_unmap(dev, start, end - start); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 931 | } |
| 932 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 933 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 934 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 935 | { |
| 936 | return __iommu_dma_map(dev, phys, size, |
Tom Murphy | 6e23502 | 2019-09-08 09:56:40 -0700 | [diff] [blame] | 937 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
| 938 | dma_get_mask(dev)); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 939 | } |
| 940 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 941 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 942 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 943 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 944 | __iommu_dma_unmap(dev, handle, size); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 945 | } |
| 946 | |
Robin Murphy | 8553f6e | 2019-05-20 09:29:40 +0200 | [diff] [blame] | 947 | static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 948 | { |
| 949 | size_t alloc_size = PAGE_ALIGN(size); |
| 950 | int count = alloc_size >> PAGE_SHIFT; |
| 951 | struct page *page = NULL, **pages = NULL; |
| 952 | |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 953 | /* Non-coherent atomic allocation? Easy */ |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 954 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
David Rientjes | c84dc6e | 2020-04-14 17:04:55 -0700 | [diff] [blame] | 955 | dma_free_from_pool(dev, cpu_addr, alloc_size)) |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 956 | return; |
| 957 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 958 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 959 | /* |
| 960 | * If it the address is remapped, then it's either non-coherent |
| 961 | * or highmem CMA, or an iommu_dma_alloc_remap() construction. |
| 962 | */ |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 963 | pages = dma_common_find_pages(cpu_addr); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 964 | if (!pages) |
| 965 | page = vmalloc_to_page(cpu_addr); |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 966 | dma_common_free_remap(cpu_addr, alloc_size); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 967 | } else { |
| 968 | /* Lowmem means a coherent atomic or CMA allocation */ |
| 969 | page = virt_to_page(cpu_addr); |
| 970 | } |
| 971 | |
| 972 | if (pages) |
| 973 | __iommu_dma_free_pages(pages, count); |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 974 | if (page) |
| 975 | dma_free_contiguous(dev, page, alloc_size); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 976 | } |
| 977 | |
Robin Murphy | 8553f6e | 2019-05-20 09:29:40 +0200 | [diff] [blame] | 978 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
| 979 | dma_addr_t handle, unsigned long attrs) |
| 980 | { |
| 981 | __iommu_dma_unmap(dev, handle, size); |
| 982 | __iommu_dma_free(dev, size, cpu_addr); |
| 983 | } |
| 984 | |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 985 | static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
| 986 | struct page **pagep, gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 987 | { |
| 988 | bool coherent = dev_is_dma_coherent(dev); |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 989 | size_t alloc_size = PAGE_ALIGN(size); |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 990 | int node = dev_to_node(dev); |
Christoph Hellwig | 9a4ab94 | 2019-05-20 09:29:39 +0200 | [diff] [blame] | 991 | struct page *page = NULL; |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 992 | void *cpu_addr; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 993 | |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 994 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 995 | if (!page) |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 996 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); |
| 997 | if (!page) |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 998 | return NULL; |
| 999 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1000 | if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 1001 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 1002 | |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 1003 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 1004 | prot, __builtin_return_address(0)); |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 1005 | if (!cpu_addr) |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1006 | goto out_free_pages; |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 1007 | |
| 1008 | if (!coherent) |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 1009 | arch_dma_prep_coherent(page, size); |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 1010 | } else { |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 1011 | cpu_addr = page_address(page); |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 1012 | } |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1013 | |
| 1014 | *pagep = page; |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 1015 | memset(cpu_addr, 0, alloc_size); |
| 1016 | return cpu_addr; |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 1017 | out_free_pages: |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 1018 | dma_free_contiguous(dev, page, alloc_size); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 1019 | return NULL; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1020 | } |
| 1021 | |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1022 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
| 1023 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
| 1024 | { |
| 1025 | bool coherent = dev_is_dma_coherent(dev); |
| 1026 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
| 1027 | struct page *page = NULL; |
| 1028 | void *cpu_addr; |
| 1029 | |
| 1030 | gfp |= __GFP_ZERO; |
| 1031 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1032 | if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1033 | !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) |
| 1034 | return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); |
| 1035 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1036 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
| 1037 | !gfpflags_allow_blocking(gfp) && !coherent) |
David Rientjes | c84dc6e | 2020-04-14 17:04:55 -0700 | [diff] [blame] | 1038 | cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, |
| 1039 | gfp); |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1040 | else |
| 1041 | cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); |
| 1042 | if (!cpu_addr) |
| 1043 | return NULL; |
| 1044 | |
Tom Murphy | 6e23502 | 2019-09-08 09:56:40 -0700 | [diff] [blame] | 1045 | *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, |
| 1046 | dev->coherent_dma_mask); |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1047 | if (*handle == DMA_MAPPING_ERROR) { |
| 1048 | __iommu_dma_free(dev, size, cpu_addr); |
| 1049 | return NULL; |
| 1050 | } |
| 1051 | |
| 1052 | return cpu_addr; |
| 1053 | } |
| 1054 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1055 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 1056 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 1057 | unsigned long attrs) |
| 1058 | { |
| 1059 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1060 | unsigned long pfn, off = vma->vm_pgoff; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1061 | int ret; |
| 1062 | |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 1063 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1064 | |
| 1065 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
| 1066 | return ret; |
| 1067 | |
| 1068 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) |
| 1069 | return -ENXIO; |
| 1070 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1071 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 1072 | struct page **pages = dma_common_find_pages(cpu_addr); |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1073 | |
| 1074 | if (pages) |
| 1075 | return __iommu_dma_mmap(pages, size, vma); |
| 1076 | pfn = vmalloc_to_pfn(cpu_addr); |
| 1077 | } else { |
| 1078 | pfn = page_to_pfn(virt_to_page(cpu_addr)); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1079 | } |
| 1080 | |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1081 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
| 1082 | vma->vm_end - vma->vm_start, |
| 1083 | vma->vm_page_prot); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1084 | } |
| 1085 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1086 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 1087 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 1088 | unsigned long attrs) |
| 1089 | { |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1090 | struct page *page; |
| 1091 | int ret; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1092 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1093 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame] | 1094 | struct page **pages = dma_common_find_pages(cpu_addr); |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1095 | |
| 1096 | if (pages) { |
| 1097 | return sg_alloc_table_from_pages(sgt, pages, |
| 1098 | PAGE_ALIGN(size) >> PAGE_SHIFT, |
| 1099 | 0, size, GFP_KERNEL); |
| 1100 | } |
| 1101 | |
| 1102 | page = vmalloc_to_page(cpu_addr); |
| 1103 | } else { |
| 1104 | page = virt_to_page(cpu_addr); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1105 | } |
| 1106 | |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1107 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
| 1108 | if (!ret) |
| 1109 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
| 1110 | return ret; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1111 | } |
| 1112 | |
Yoshihiro Shimoda | 158a6d3 | 2019-08-28 21:35:41 +0900 | [diff] [blame] | 1113 | static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
| 1114 | { |
| 1115 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
| 1116 | |
| 1117 | return (1UL << __ffs(domain->pgsize_bitmap)) - 1; |
| 1118 | } |
| 1119 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1120 | static const struct dma_map_ops iommu_dma_ops = { |
| 1121 | .alloc = iommu_dma_alloc, |
| 1122 | .free = iommu_dma_free, |
| 1123 | .mmap = iommu_dma_mmap, |
| 1124 | .get_sgtable = iommu_dma_get_sgtable, |
| 1125 | .map_page = iommu_dma_map_page, |
| 1126 | .unmap_page = iommu_dma_unmap_page, |
| 1127 | .map_sg = iommu_dma_map_sg, |
| 1128 | .unmap_sg = iommu_dma_unmap_sg, |
| 1129 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, |
| 1130 | .sync_single_for_device = iommu_dma_sync_single_for_device, |
| 1131 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, |
| 1132 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, |
| 1133 | .map_resource = iommu_dma_map_resource, |
| 1134 | .unmap_resource = iommu_dma_unmap_resource, |
Yoshihiro Shimoda | 158a6d3 | 2019-08-28 21:35:41 +0900 | [diff] [blame] | 1135 | .get_merge_boundary = iommu_dma_get_merge_boundary, |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1136 | }; |
| 1137 | |
| 1138 | /* |
| 1139 | * The IOMMU core code allocates the default DMA domain, which the underlying |
| 1140 | * IOMMU driver needs to support via the dma-iommu layer. |
| 1141 | */ |
| 1142 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) |
| 1143 | { |
| 1144 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 1145 | |
| 1146 | if (!domain) |
| 1147 | goto out_err; |
| 1148 | |
| 1149 | /* |
| 1150 | * The IOMMU core code allocates the default DMA domain, which the |
| 1151 | * underlying IOMMU driver needs to support via the dma-iommu layer. |
| 1152 | */ |
| 1153 | if (domain->type == IOMMU_DOMAIN_DMA) { |
| 1154 | if (iommu_dma_init_domain(domain, dma_base, size, dev)) |
| 1155 | goto out_err; |
| 1156 | dev->dma_ops = &iommu_dma_ops; |
| 1157 | } |
| 1158 | |
| 1159 | return; |
| 1160 | out_err: |
| 1161 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", |
| 1162 | dev_name(dev)); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1163 | } |
| 1164 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1165 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
| 1166 | phys_addr_t msi_addr, struct iommu_domain *domain) |
| 1167 | { |
| 1168 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 1169 | struct iommu_dma_msi_page *msi_page; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 1170 | dma_addr_t iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1171 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 1172 | size_t size = cookie_msi_granule(cookie); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1173 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 1174 | msi_addr &= ~(phys_addr_t)(size - 1); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1175 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
| 1176 | if (msi_page->phys == msi_addr) |
| 1177 | return msi_page; |
| 1178 | |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 1179 | msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1180 | if (!msi_page) |
| 1181 | return NULL; |
| 1182 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1183 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
| 1184 | if (!iova) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 1185 | goto out_free_page; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1186 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1187 | if (iommu_map(domain, iova, msi_addr, size, prot)) |
| 1188 | goto out_free_iova; |
| 1189 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1190 | INIT_LIST_HEAD(&msi_page->list); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 1191 | msi_page->phys = msi_addr; |
| 1192 | msi_page->iova = iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1193 | list_add(&msi_page->list, &cookie->msi_page_list); |
| 1194 | return msi_page; |
| 1195 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1196 | out_free_iova: |
| 1197 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1198 | out_free_page: |
| 1199 | kfree(msi_page); |
| 1200 | return NULL; |
| 1201 | } |
| 1202 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1203 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1204 | { |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1205 | struct device *dev = msi_desc_to_dev(desc); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1206 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1207 | struct iommu_dma_msi_page *msi_page; |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 1208 | static DEFINE_MUTEX(msi_prepare_lock); /* see below */ |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1209 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1210 | if (!domain || !domain->iova_cookie) { |
| 1211 | desc->iommu_cookie = NULL; |
| 1212 | return 0; |
| 1213 | } |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1214 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1215 | /* |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 1216 | * In fact the whole prepare operation should already be serialised by |
| 1217 | * irq_domain_mutex further up the callchain, but that's pretty subtle |
| 1218 | * on its own, so consider this locking as failsafe documentation... |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1219 | */ |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 1220 | mutex_lock(&msi_prepare_lock); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1221 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
Robin Murphy | c186479 | 2019-12-09 19:47:25 +0000 | [diff] [blame] | 1222 | mutex_unlock(&msi_prepare_lock); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1223 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1224 | msi_desc_set_iommu_cookie(desc, msi_page); |
| 1225 | |
| 1226 | if (!msi_page) |
| 1227 | return -ENOMEM; |
| 1228 | return 0; |
| 1229 | } |
| 1230 | |
| 1231 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, |
| 1232 | struct msi_msg *msg) |
| 1233 | { |
| 1234 | struct device *dev = msi_desc_to_dev(desc); |
| 1235 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 1236 | const struct iommu_dma_msi_page *msi_page; |
| 1237 | |
| 1238 | msi_page = msi_desc_get_iommu_cookie(desc); |
| 1239 | |
| 1240 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) |
| 1241 | return; |
| 1242 | |
| 1243 | msg->address_hi = upper_32_bits(msi_page->iova); |
| 1244 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; |
| 1245 | msg->address_lo += lower_32_bits(msi_page->iova); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1246 | } |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1247 | |
| 1248 | static int iommu_dma_init(void) |
| 1249 | { |
| 1250 | return iova_cache_get(); |
| 1251 | } |
| 1252 | arch_initcall(iommu_dma_init); |