Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 2 | /* |
| 3 | * A fairly generic DMA-API to IOMMU-API glue layer. |
| 4 | * |
| 5 | * Copyright (C) 2014-2015 ARM Ltd. |
| 6 | * |
| 7 | * based in part on arch/arm/mm/dma-mapping.c: |
| 8 | * Copyright (C) 2000-2004 Russell King |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 9 | */ |
| 10 | |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 11 | #include <linux/acpi_iort.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 12 | #include <linux/device.h> |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 13 | #include <linux/dma-contiguous.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 14 | #include <linux/dma-iommu.h> |
Christoph Hellwig | af751d4 | 2019-05-20 09:29:27 +0200 | [diff] [blame] | 15 | #include <linux/dma-noncoherent.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 16 | #include <linux/gfp.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 17 | #include <linux/huge_mm.h> |
| 18 | #include <linux/iommu.h> |
| 19 | #include <linux/iova.h> |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 20 | #include <linux/irq.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 21 | #include <linux/mm.h> |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 22 | #include <linux/pci.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 23 | #include <linux/scatterlist.h> |
| 24 | #include <linux/vmalloc.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 25 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 26 | struct iommu_dma_msi_page { |
| 27 | struct list_head list; |
| 28 | dma_addr_t iova; |
| 29 | phys_addr_t phys; |
| 30 | }; |
| 31 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 32 | enum iommu_dma_cookie_type { |
| 33 | IOMMU_DMA_IOVA_COOKIE, |
| 34 | IOMMU_DMA_MSI_COOKIE, |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 35 | }; |
| 36 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 37 | struct iommu_dma_cookie { |
| 38 | enum iommu_dma_cookie_type type; |
| 39 | union { |
| 40 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
| 41 | struct iova_domain iovad; |
| 42 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
| 43 | dma_addr_t msi_iova; |
| 44 | }; |
| 45 | struct list_head msi_page_list; |
| 46 | spinlock_t msi_lock; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 47 | |
| 48 | /* Domain for flush queue callback; NULL if flush queue not in use */ |
| 49 | struct iommu_domain *fq_domain; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 50 | }; |
| 51 | |
| 52 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
| 53 | { |
| 54 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) |
| 55 | return cookie->iovad.granule; |
| 56 | return PAGE_SIZE; |
| 57 | } |
| 58 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 59 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
| 60 | { |
| 61 | struct iommu_dma_cookie *cookie; |
| 62 | |
| 63 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
| 64 | if (cookie) { |
| 65 | spin_lock_init(&cookie->msi_lock); |
| 66 | INIT_LIST_HEAD(&cookie->msi_page_list); |
| 67 | cookie->type = type; |
| 68 | } |
| 69 | return cookie; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 72 | /** |
| 73 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain |
| 74 | * @domain: IOMMU domain to prepare for DMA-API usage |
| 75 | * |
| 76 | * IOMMU drivers should normally call this from their domain_alloc |
| 77 | * callback when domain->type == IOMMU_DOMAIN_DMA. |
| 78 | */ |
| 79 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
| 80 | { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 81 | if (domain->iova_cookie) |
| 82 | return -EEXIST; |
| 83 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 84 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); |
| 85 | if (!domain->iova_cookie) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 86 | return -ENOMEM; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 87 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 88 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 89 | } |
| 90 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
| 91 | |
| 92 | /** |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 93 | * iommu_get_msi_cookie - Acquire just MSI remapping resources |
| 94 | * @domain: IOMMU domain to prepare |
| 95 | * @base: Start address of IOVA region for MSI mappings |
| 96 | * |
| 97 | * Users who manage their own IOVA allocation and do not want DMA API support, |
| 98 | * but would still like to take advantage of automatic MSI remapping, can use |
| 99 | * this to initialise their own domain appropriately. Users should reserve a |
| 100 | * contiguous IOVA region, starting at @base, large enough to accommodate the |
| 101 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address |
| 102 | * used by the devices attached to @domain. |
| 103 | */ |
| 104 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) |
| 105 | { |
| 106 | struct iommu_dma_cookie *cookie; |
| 107 | |
| 108 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
| 109 | return -EINVAL; |
| 110 | |
| 111 | if (domain->iova_cookie) |
| 112 | return -EEXIST; |
| 113 | |
| 114 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
| 115 | if (!cookie) |
| 116 | return -ENOMEM; |
| 117 | |
| 118 | cookie->msi_iova = base; |
| 119 | domain->iova_cookie = cookie; |
| 120 | return 0; |
| 121 | } |
| 122 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
| 123 | |
| 124 | /** |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 125 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 126 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
| 127 | * iommu_get_msi_cookie() |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 128 | * |
| 129 | * IOMMU drivers should normally call this from their domain_free callback. |
| 130 | */ |
| 131 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
| 132 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 133 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 134 | struct iommu_dma_msi_page *msi, *tmp; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 135 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 136 | if (!cookie) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 137 | return; |
| 138 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 139 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 140 | put_iova_domain(&cookie->iovad); |
| 141 | |
| 142 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
| 143 | list_del(&msi->list); |
| 144 | kfree(msi); |
| 145 | } |
| 146 | kfree(cookie); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 147 | domain->iova_cookie = NULL; |
| 148 | } |
| 149 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
| 150 | |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 151 | /** |
| 152 | * iommu_dma_get_resv_regions - Reserved region driver helper |
| 153 | * @dev: Device from iommu_get_resv_regions() |
| 154 | * @list: Reserved region list from iommu_get_resv_regions() |
| 155 | * |
| 156 | * IOMMU drivers can use this to implement their .get_resv_regions callback |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 157 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
| 158 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI |
| 159 | * reservation. |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 160 | */ |
| 161 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 162 | { |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 163 | |
Joerg Roedel | 98cc4f7 | 2018-11-29 14:01:00 +0100 | [diff] [blame] | 164 | if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 165 | iort_iommu_msi_get_resv_regions(dev, list); |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 166 | |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 167 | } |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 168 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 169 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 170 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
| 171 | phys_addr_t start, phys_addr_t end) |
| 172 | { |
| 173 | struct iova_domain *iovad = &cookie->iovad; |
| 174 | struct iommu_dma_msi_page *msi_page; |
| 175 | int i, num_pages; |
| 176 | |
| 177 | start -= iova_offset(iovad, start); |
| 178 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); |
| 179 | |
| 180 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); |
| 181 | if (!msi_page) |
| 182 | return -ENOMEM; |
| 183 | |
| 184 | for (i = 0; i < num_pages; i++) { |
| 185 | msi_page[i].phys = start; |
| 186 | msi_page[i].iova = start; |
| 187 | INIT_LIST_HEAD(&msi_page[i].list); |
| 188 | list_add(&msi_page[i].list, &cookie->msi_page_list); |
| 189 | start += iovad->granule; |
| 190 | } |
| 191 | |
| 192 | return 0; |
| 193 | } |
| 194 | |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 195 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 196 | struct iova_domain *iovad) |
| 197 | { |
| 198 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
| 199 | struct resource_entry *window; |
| 200 | unsigned long lo, hi; |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 201 | phys_addr_t start = 0, end; |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 202 | |
| 203 | resource_list_for_each_entry(window, &bridge->windows) { |
| 204 | if (resource_type(window->res) != IORESOURCE_MEM) |
| 205 | continue; |
| 206 | |
| 207 | lo = iova_pfn(iovad, window->res->start - window->offset); |
| 208 | hi = iova_pfn(iovad, window->res->end - window->offset); |
| 209 | reserve_iova(iovad, lo, hi); |
| 210 | } |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 211 | |
| 212 | /* Get reserved DMA windows from host bridge */ |
| 213 | resource_list_for_each_entry(window, &bridge->dma_ranges) { |
| 214 | end = window->res->start - window->offset; |
| 215 | resv_iova: |
| 216 | if (end > start) { |
| 217 | lo = iova_pfn(iovad, start); |
| 218 | hi = iova_pfn(iovad, end); |
| 219 | reserve_iova(iovad, lo, hi); |
| 220 | } else { |
| 221 | /* dma_ranges list should be sorted */ |
| 222 | dev_err(&dev->dev, "Failed to reserve IOVA\n"); |
| 223 | return -EINVAL; |
| 224 | } |
| 225 | |
| 226 | start = window->res->end - window->offset + 1; |
| 227 | /* If window is last entry */ |
| 228 | if (window->node.next == &bridge->dma_ranges && |
Arnd Bergmann | 29fcea8 | 2019-06-17 15:30:54 +0200 | [diff] [blame] | 229 | end != ~(phys_addr_t)0) { |
| 230 | end = ~(phys_addr_t)0; |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 231 | goto resv_iova; |
| 232 | } |
| 233 | } |
| 234 | |
| 235 | return 0; |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 236 | } |
| 237 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 238 | static int iova_reserve_iommu_regions(struct device *dev, |
| 239 | struct iommu_domain *domain) |
| 240 | { |
| 241 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 242 | struct iova_domain *iovad = &cookie->iovad; |
| 243 | struct iommu_resv_region *region; |
| 244 | LIST_HEAD(resv_regions); |
| 245 | int ret = 0; |
| 246 | |
Srinath Mannam | aadad097 | 2019-05-03 19:35:33 +0530 | [diff] [blame] | 247 | if (dev_is_pci(dev)) { |
| 248 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
| 249 | if (ret) |
| 250 | return ret; |
| 251 | } |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 252 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 253 | iommu_get_resv_regions(dev, &resv_regions); |
| 254 | list_for_each_entry(region, &resv_regions, list) { |
| 255 | unsigned long lo, hi; |
| 256 | |
| 257 | /* We ARE the software that manages these! */ |
| 258 | if (region->type == IOMMU_RESV_SW_MSI) |
| 259 | continue; |
| 260 | |
| 261 | lo = iova_pfn(iovad, region->start); |
| 262 | hi = iova_pfn(iovad, region->start + region->length - 1); |
| 263 | reserve_iova(iovad, lo, hi); |
| 264 | |
| 265 | if (region->type == IOMMU_RESV_MSI) |
| 266 | ret = cookie_init_hw_msi_region(cookie, region->start, |
| 267 | region->start + region->length); |
| 268 | if (ret) |
| 269 | break; |
| 270 | } |
| 271 | iommu_put_resv_regions(dev, &resv_regions); |
| 272 | |
| 273 | return ret; |
| 274 | } |
| 275 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 276 | static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
| 277 | { |
| 278 | struct iommu_dma_cookie *cookie; |
| 279 | struct iommu_domain *domain; |
| 280 | |
| 281 | cookie = container_of(iovad, struct iommu_dma_cookie, iovad); |
| 282 | domain = cookie->fq_domain; |
| 283 | /* |
| 284 | * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE |
| 285 | * implies that ops->flush_iotlb_all must be non-NULL. |
| 286 | */ |
| 287 | domain->ops->flush_iotlb_all(domain); |
| 288 | } |
| 289 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 290 | /** |
| 291 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
| 292 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
| 293 | * @base: IOVA at which the mappable address space starts |
| 294 | * @size: Size of IOVA space |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 295 | * @dev: Device the domain is being initialised for |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 296 | * |
| 297 | * @base and @size should be exact multiples of IOMMU page granularity to |
| 298 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
| 299 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
| 300 | * any change which could make prior IOVAs invalid will fail. |
| 301 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 302 | static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 303 | u64 size, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 304 | { |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 305 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 306 | struct iova_domain *iovad = &cookie->iovad; |
Shaokun Zhang | c61a463 | 2019-01-24 15:10:02 +0800 | [diff] [blame] | 307 | unsigned long order, base_pfn; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 308 | int attr; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 309 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 310 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
| 311 | return -EINVAL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 312 | |
| 313 | /* Use the smallest supported page size for IOVA granularity */ |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 314 | order = __ffs(domain->pgsize_bitmap); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 315 | base_pfn = max_t(unsigned long, 1, base >> order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 316 | |
| 317 | /* Check the domain allows at least some access to the device... */ |
| 318 | if (domain->geometry.force_aperture) { |
| 319 | if (base > domain->geometry.aperture_end || |
| 320 | base + size <= domain->geometry.aperture_start) { |
| 321 | pr_warn("specified DMA range outside IOMMU capability\n"); |
| 322 | return -EFAULT; |
| 323 | } |
| 324 | /* ...then finally give it a kicking to make sure it fits */ |
| 325 | base_pfn = max_t(unsigned long, base_pfn, |
| 326 | domain->geometry.aperture_start >> order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 327 | } |
| 328 | |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 329 | /* start_pfn is always nonzero for an already-initialised domain */ |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 330 | if (iovad->start_pfn) { |
| 331 | if (1UL << order != iovad->granule || |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 332 | base_pfn != iovad->start_pfn) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 333 | pr_warn("Incompatible range for DMA domain\n"); |
| 334 | return -EFAULT; |
| 335 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 336 | |
| 337 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 338 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 339 | |
Zhen Lei | aa3ac94 | 2017-09-21 16:52:45 +0100 | [diff] [blame] | 340 | init_iova_domain(iovad, 1UL << order, base_pfn); |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 341 | |
| 342 | if (!cookie->fq_domain && !iommu_domain_get_attr(domain, |
| 343 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { |
| 344 | cookie->fq_domain = domain; |
| 345 | init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); |
| 346 | } |
| 347 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 348 | if (!dev) |
| 349 | return 0; |
| 350 | |
| 351 | return iova_reserve_iommu_regions(dev, domain); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 352 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 353 | |
| 354 | /** |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 355 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
| 356 | * page flags. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 357 | * @dir: Direction of DMA transfer |
| 358 | * @coherent: Is the DMA master cache-coherent? |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 359 | * @attrs: DMA attributes for the mapping |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 360 | * |
| 361 | * Return: corresponding IOMMU API page protection flags |
| 362 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 363 | static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 364 | unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 365 | { |
| 366 | int prot = coherent ? IOMMU_CACHE : 0; |
| 367 | |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 368 | if (attrs & DMA_ATTR_PRIVILEGED) |
| 369 | prot |= IOMMU_PRIV; |
| 370 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 371 | switch (dir) { |
| 372 | case DMA_BIDIRECTIONAL: |
| 373 | return prot | IOMMU_READ | IOMMU_WRITE; |
| 374 | case DMA_TO_DEVICE: |
| 375 | return prot | IOMMU_READ; |
| 376 | case DMA_FROM_DEVICE: |
| 377 | return prot | IOMMU_WRITE; |
| 378 | default: |
| 379 | return 0; |
| 380 | } |
| 381 | } |
| 382 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 383 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
| 384 | size_t size, dma_addr_t dma_limit, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 385 | { |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 386 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 387 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 388 | unsigned long shift, iova_len, iova = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 389 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 390 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
| 391 | cookie->msi_iova += size; |
| 392 | return cookie->msi_iova - size; |
| 393 | } |
| 394 | |
| 395 | shift = iova_shift(iovad); |
| 396 | iova_len = size >> shift; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 397 | /* |
| 398 | * Freeing non-power-of-two-sized allocations back into the IOVA caches |
| 399 | * will come back to bite us badly, so we have to waste a bit of space |
| 400 | * rounding up anything cacheable to make sure that can't happen. The |
| 401 | * order of the unadjusted size will still match upon freeing. |
| 402 | */ |
| 403 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
| 404 | iova_len = roundup_pow_of_two(iova_len); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 405 | |
Robin Murphy | 03bfdc3 | 2018-07-23 23:16:10 +0100 | [diff] [blame] | 406 | if (dev->bus_dma_mask) |
| 407 | dma_limit &= dev->bus_dma_mask; |
| 408 | |
Robin Murphy | c987ff0 | 2016-08-09 17:31:35 +0100 | [diff] [blame] | 409 | if (domain->geometry.force_aperture) |
| 410 | dma_limit = min(dma_limit, domain->geometry.aperture_end); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 411 | |
| 412 | /* Try to get PCI devices a SAC address */ |
| 413 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 414 | iova = alloc_iova_fast(iovad, iova_len, |
| 415 | DMA_BIT_MASK(32) >> shift, false); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 416 | |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 417 | if (!iova) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 418 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
| 419 | true); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 420 | |
| 421 | return (dma_addr_t)iova << shift; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 422 | } |
| 423 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 424 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
| 425 | dma_addr_t iova, size_t size) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 426 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 427 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 428 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 429 | /* The MSI case is only ever cleaning up its most recent allocation */ |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 430 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 431 | cookie->msi_iova -= size; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 432 | else if (cookie->fq_domain) /* non-strict mode */ |
| 433 | queue_iova(iovad, iova_pfn(iovad, iova), |
| 434 | size >> iova_shift(iovad), 0); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 435 | else |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 436 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
| 437 | size >> iova_shift(iovad)); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 438 | } |
| 439 | |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 440 | static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 441 | size_t size) |
| 442 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 443 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 444 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 445 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 446 | size_t iova_off = iova_offset(iovad, dma_addr); |
| 447 | |
| 448 | dma_addr -= iova_off; |
| 449 | size = iova_align(iovad, size + iova_off); |
| 450 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 451 | WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); |
| 452 | if (!cookie->fq_domain) |
| 453 | iommu_tlb_sync(domain); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 454 | iommu_dma_free_iova(cookie, dma_addr, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 455 | } |
| 456 | |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 457 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 458 | size_t size, int prot) |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 459 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 460 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 461 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 462 | struct iova_domain *iovad = &cookie->iovad; |
| 463 | size_t iova_off = iova_offset(iovad, phys); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 464 | dma_addr_t iova; |
| 465 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 466 | size = iova_align(iovad, size + iova_off); |
Christoph Hellwig | 92aec09 | 2019-05-20 09:29:30 +0200 | [diff] [blame] | 467 | |
| 468 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
| 469 | if (!iova) |
| 470 | return DMA_MAPPING_ERROR; |
| 471 | |
| 472 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
| 473 | iommu_dma_free_iova(cookie, iova, size); |
| 474 | return DMA_MAPPING_ERROR; |
| 475 | } |
| 476 | return iova + iova_off; |
| 477 | } |
| 478 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 479 | static void __iommu_dma_free_pages(struct page **pages, int count) |
| 480 | { |
| 481 | while (count--) |
| 482 | __free_page(pages[count]); |
| 483 | kvfree(pages); |
| 484 | } |
| 485 | |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 486 | static struct page **__iommu_dma_alloc_pages(struct device *dev, |
| 487 | unsigned int count, unsigned long order_mask, gfp_t gfp) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 488 | { |
| 489 | struct page **pages; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 490 | unsigned int i = 0, nid = dev_to_node(dev); |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 491 | |
| 492 | order_mask &= (2U << MAX_ORDER) - 1; |
| 493 | if (!order_mask) |
| 494 | return NULL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 495 | |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 496 | pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 497 | if (!pages) |
| 498 | return NULL; |
| 499 | |
| 500 | /* IOMMU can map any pages, so himem can also be used here */ |
| 501 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; |
| 502 | |
| 503 | while (count) { |
| 504 | struct page *page = NULL; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 505 | unsigned int order_size; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 506 | |
| 507 | /* |
| 508 | * Higher-order allocations are a convenience rather |
| 509 | * than a necessity, hence using __GFP_NORETRY until |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 510 | * falling back to minimum-order allocations. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 511 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 512 | for (order_mask &= (2U << __fls(count)) - 1; |
| 513 | order_mask; order_mask &= ~order_size) { |
| 514 | unsigned int order = __fls(order_mask); |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 515 | gfp_t alloc_flags = gfp; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 516 | |
| 517 | order_size = 1U << order; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 518 | if (order_mask > order_size) |
| 519 | alloc_flags |= __GFP_NORETRY; |
| 520 | page = alloc_pages_node(nid, alloc_flags, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 521 | if (!page) |
| 522 | continue; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 523 | if (!order) |
| 524 | break; |
| 525 | if (!PageCompound(page)) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 526 | split_page(page, order); |
| 527 | break; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 528 | } else if (!split_huge_page(page)) { |
| 529 | break; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 530 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 531 | __free_pages(page, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 532 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 533 | if (!page) { |
| 534 | __iommu_dma_free_pages(pages, i); |
| 535 | return NULL; |
| 536 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 537 | count -= order_size; |
| 538 | while (order_size--) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 539 | pages[i++] = page++; |
| 540 | } |
| 541 | return pages; |
| 542 | } |
| 543 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 544 | /** |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 545 | * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 546 | * @dev: Device to allocate memory for. Must be a real device |
| 547 | * attached to an iommu_dma_domain |
| 548 | * @size: Size of buffer in bytes |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 549 | * @dma_handle: Out argument for allocated DMA handle |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 550 | * @gfp: Allocation flags |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 551 | * @attrs: DMA attributes for this allocation |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 552 | * |
| 553 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, |
| 554 | * but an IOMMU which supports smaller pages might not map the whole thing. |
| 555 | * |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 556 | * Return: Mapped virtual address, or NULL on failure. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 557 | */ |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 558 | static void *iommu_dma_alloc_remap(struct device *dev, size_t size, |
| 559 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 560 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 561 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 562 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 563 | struct iova_domain *iovad = &cookie->iovad; |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 564 | bool coherent = dev_is_dma_coherent(dev); |
| 565 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 566 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 567 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 568 | struct page **pages; |
| 569 | struct sg_table sgt; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 570 | dma_addr_t iova; |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 571 | void *vaddr; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 572 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 573 | *dma_handle = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 574 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 575 | min_size = alloc_sizes & -alloc_sizes; |
| 576 | if (min_size < PAGE_SIZE) { |
| 577 | min_size = PAGE_SIZE; |
| 578 | alloc_sizes |= PAGE_SIZE; |
| 579 | } else { |
| 580 | size = ALIGN(size, min_size); |
| 581 | } |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 582 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 583 | alloc_sizes = min_size; |
| 584 | |
| 585 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Ganapatrao Kulkarni | c4b17afb0 | 2018-11-30 19:14:00 +0800 | [diff] [blame] | 586 | pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, |
| 587 | gfp); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 588 | if (!pages) |
| 589 | return NULL; |
| 590 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 591 | size = iova_align(iovad, size); |
| 592 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 593 | if (!iova) |
| 594 | goto out_free_pages; |
| 595 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 596 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
| 597 | goto out_free_iova; |
| 598 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 599 | if (!(ioprot & IOMMU_CACHE)) { |
Christoph Hellwig | 23f88e0 | 2019-05-20 09:29:28 +0200 | [diff] [blame] | 600 | struct scatterlist *sg; |
| 601 | int i; |
| 602 | |
| 603 | for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) |
| 604 | arch_dma_prep_coherent(sg_page(sg), sg->length); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 605 | } |
| 606 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 607 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 608 | < size) |
| 609 | goto out_free_sg; |
| 610 | |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 611 | vaddr = dma_common_pages_remap(pages, size, prot, |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 612 | __builtin_return_address(0)); |
| 613 | if (!vaddr) |
| 614 | goto out_unmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 615 | |
Christoph Hellwig | 21b95aa | 2019-05-20 09:29:34 +0200 | [diff] [blame] | 616 | *dma_handle = iova; |
| 617 | sg_free_table(&sgt); |
| 618 | return vaddr; |
| 619 | |
| 620 | out_unmap: |
| 621 | __iommu_dma_unmap(dev, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 622 | out_free_sg: |
| 623 | sg_free_table(&sgt); |
| 624 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 625 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 626 | out_free_pages: |
| 627 | __iommu_dma_free_pages(pages, count); |
| 628 | return NULL; |
| 629 | } |
| 630 | |
| 631 | /** |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 632 | * __iommu_dma_mmap - Map a buffer into provided user VMA |
| 633 | * @pages: Array representing buffer from __iommu_dma_alloc() |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 634 | * @size: Size of buffer in bytes |
| 635 | * @vma: VMA describing requested userspace mapping |
| 636 | * |
| 637 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible |
| 638 | * for verifying the correct size and protection of @vma beforehand. |
| 639 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 640 | static int __iommu_dma_mmap(struct page **pages, size_t size, |
| 641 | struct vm_area_struct *vma) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 642 | { |
Souptick Joarder | b0d0084 | 2019-05-13 17:22:15 -0700 | [diff] [blame] | 643 | return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 644 | } |
| 645 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 646 | static void iommu_dma_sync_single_for_cpu(struct device *dev, |
| 647 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 648 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 649 | phys_addr_t phys; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 650 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 651 | if (dev_is_dma_coherent(dev)) |
| 652 | return; |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 653 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 654 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
| 655 | arch_sync_dma_for_cpu(dev, phys, size, dir); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 656 | } |
| 657 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 658 | static void iommu_dma_sync_single_for_device(struct device *dev, |
| 659 | dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 660 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 661 | phys_addr_t phys; |
| 662 | |
| 663 | if (dev_is_dma_coherent(dev)) |
| 664 | return; |
| 665 | |
| 666 | phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); |
| 667 | arch_sync_dma_for_device(dev, phys, size, dir); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 668 | } |
| 669 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 670 | static void iommu_dma_sync_sg_for_cpu(struct device *dev, |
| 671 | struct scatterlist *sgl, int nelems, |
| 672 | enum dma_data_direction dir) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 673 | { |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 674 | struct scatterlist *sg; |
| 675 | int i; |
| 676 | |
| 677 | if (dev_is_dma_coherent(dev)) |
| 678 | return; |
| 679 | |
| 680 | for_each_sg(sgl, sg, nelems, i) |
| 681 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); |
| 682 | } |
| 683 | |
| 684 | static void iommu_dma_sync_sg_for_device(struct device *dev, |
| 685 | struct scatterlist *sgl, int nelems, |
| 686 | enum dma_data_direction dir) |
| 687 | { |
| 688 | struct scatterlist *sg; |
| 689 | int i; |
| 690 | |
| 691 | if (dev_is_dma_coherent(dev)) |
| 692 | return; |
| 693 | |
| 694 | for_each_sg(sgl, sg, nelems, i) |
| 695 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); |
| 696 | } |
| 697 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 698 | static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
| 699 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 700 | unsigned long attrs) |
| 701 | { |
| 702 | phys_addr_t phys = page_to_phys(page) + offset; |
| 703 | bool coherent = dev_is_dma_coherent(dev); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 704 | int prot = dma_info_to_prot(dir, coherent, attrs); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 705 | dma_addr_t dma_handle; |
| 706 | |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 707 | dma_handle =__iommu_dma_map(dev, phys, size, prot); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 708 | if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && |
| 709 | dma_handle != DMA_MAPPING_ERROR) |
| 710 | arch_sync_dma_for_device(dev, phys, size, dir); |
| 711 | return dma_handle; |
| 712 | } |
| 713 | |
| 714 | static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, |
| 715 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 716 | { |
| 717 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 718 | iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 719 | __iommu_dma_unmap(dev, dma_handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 720 | } |
| 721 | |
| 722 | /* |
| 723 | * Prepare a successfully-mapped scatterlist to give back to the caller. |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 724 | * |
| 725 | * At this point the segments are already laid out by iommu_dma_map_sg() to |
| 726 | * avoid individually crossing any boundaries, so we merely need to check a |
| 727 | * segment's start address to avoid concatenating across one. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 728 | */ |
| 729 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 730 | dma_addr_t dma_addr) |
| 731 | { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 732 | struct scatterlist *s, *cur = sg; |
| 733 | unsigned long seg_mask = dma_get_seg_boundary(dev); |
| 734 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); |
| 735 | int i, count = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 736 | |
| 737 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 738 | /* Restore this segment's original unaligned fields first */ |
| 739 | unsigned int s_iova_off = sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 740 | unsigned int s_length = sg_dma_len(s); |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 741 | unsigned int s_iova_len = s->length; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 742 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 743 | s->offset += s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 744 | s->length = s_length; |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 745 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 746 | sg_dma_len(s) = 0; |
| 747 | |
| 748 | /* |
| 749 | * Now fill in the real DMA data. If... |
| 750 | * - there is a valid output segment to append to |
| 751 | * - and this segment starts on an IOVA page boundary |
| 752 | * - but doesn't fall at a segment boundary |
| 753 | * - and wouldn't make the resulting output segment too long |
| 754 | */ |
| 755 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && |
Robin Murphy | ab2cbeb | 2019-07-29 17:46:00 +0100 | [diff] [blame] | 756 | (max_len - cur_len >= s_length)) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 757 | /* ...then concatenate it with the previous one */ |
| 758 | cur_len += s_length; |
| 759 | } else { |
| 760 | /* Otherwise start the next output segment */ |
| 761 | if (i > 0) |
| 762 | cur = sg_next(cur); |
| 763 | cur_len = s_length; |
| 764 | count++; |
| 765 | |
| 766 | sg_dma_address(cur) = dma_addr + s_iova_off; |
| 767 | } |
| 768 | |
| 769 | sg_dma_len(cur) = cur_len; |
| 770 | dma_addr += s_iova_len; |
| 771 | |
| 772 | if (s_length + s_iova_off < s_iova_len) |
| 773 | cur_len = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 774 | } |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 775 | return count; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 776 | } |
| 777 | |
| 778 | /* |
| 779 | * If mapping failed, then just restore the original list, |
| 780 | * but making sure the DMA fields are invalidated. |
| 781 | */ |
| 782 | static void __invalidate_sg(struct scatterlist *sg, int nents) |
| 783 | { |
| 784 | struct scatterlist *s; |
| 785 | int i; |
| 786 | |
| 787 | for_each_sg(sg, s, nents, i) { |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 788 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
Robin Murphy | 07b48ac | 2016-03-10 19:28:12 +0000 | [diff] [blame] | 789 | s->offset += sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 790 | if (sg_dma_len(s)) |
| 791 | s->length = sg_dma_len(s); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame] | 792 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 793 | sg_dma_len(s) = 0; |
| 794 | } |
| 795 | } |
| 796 | |
| 797 | /* |
| 798 | * The DMA API client is passing in a scatterlist which could describe |
| 799 | * any old buffer layout, but the IOMMU API requires everything to be |
| 800 | * aligned to IOMMU pages. Hence the need for this complicated bit of |
| 801 | * impedance-matching, to be able to hand off a suitably-aligned list, |
| 802 | * but still preserve the original offsets and sizes for the caller. |
| 803 | */ |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 804 | static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 805 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 806 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 807 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 808 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 809 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 810 | struct scatterlist *s, *prev = NULL; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 811 | int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 812 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 813 | size_t iova_len = 0; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 814 | unsigned long mask = dma_get_seg_boundary(dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 815 | int i; |
| 816 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 817 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 818 | iommu_dma_sync_sg_for_device(dev, sg, nents, dir); |
| 819 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 820 | /* |
| 821 | * Work out how much IOVA space we need, and align the segments to |
| 822 | * IOVA granules for the IOMMU driver to handle. With some clever |
| 823 | * trickery we can modify the list in-place, but reversibly, by |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 824 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 825 | */ |
| 826 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 827 | size_t s_iova_off = iova_offset(iovad, s->offset); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 828 | size_t s_length = s->length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 829 | size_t pad_len = (mask - iova_len + 1) & mask; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 830 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 831 | sg_dma_address(s) = s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 832 | sg_dma_len(s) = s_length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 833 | s->offset -= s_iova_off; |
| 834 | s_length = iova_align(iovad, s_length + s_iova_off); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 835 | s->length = s_length; |
| 836 | |
| 837 | /* |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 838 | * Due to the alignment of our single IOVA allocation, we can |
| 839 | * depend on these assumptions about the segment boundary mask: |
| 840 | * - If mask size >= IOVA size, then the IOVA range cannot |
| 841 | * possibly fall across a boundary, so we don't care. |
| 842 | * - If mask size < IOVA size, then the IOVA range must start |
| 843 | * exactly on a boundary, therefore we can lay things out |
| 844 | * based purely on segment lengths without needing to know |
| 845 | * the actual addresses beforehand. |
| 846 | * - The mask must be a power of 2, so pad_len == 0 if |
| 847 | * iova_len == 0, thus we cannot dereference prev the first |
| 848 | * time through here (i.e. before it has a meaningful value). |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 849 | */ |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 850 | if (pad_len && pad_len < s_length - 1) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 851 | prev->length += pad_len; |
| 852 | iova_len += pad_len; |
| 853 | } |
| 854 | |
| 855 | iova_len += s_length; |
| 856 | prev = s; |
| 857 | } |
| 858 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 859 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 860 | if (!iova) |
| 861 | goto out_restore_sg; |
| 862 | |
| 863 | /* |
| 864 | * We'll leave any physical concatenation to the IOMMU driver's |
| 865 | * implementation - it knows better than we do. |
| 866 | */ |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 867 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 868 | goto out_free_iova; |
| 869 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 870 | return __finalise_sg(dev, sg, nents, iova); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 871 | |
| 872 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 873 | iommu_dma_free_iova(cookie, iova, iova_len); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 874 | out_restore_sg: |
| 875 | __invalidate_sg(sg, nents); |
| 876 | return 0; |
| 877 | } |
| 878 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 879 | static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, |
| 880 | int nents, enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 881 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 882 | dma_addr_t start, end; |
| 883 | struct scatterlist *tmp; |
| 884 | int i; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 885 | |
Nathan Chancellor | 1b961423 | 2019-05-29 01:15:32 -0700 | [diff] [blame] | 886 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 887 | iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); |
| 888 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 889 | /* |
| 890 | * The scatterlist segments are mapped into a single |
| 891 | * contiguous IOVA allocation, so this is incredibly easy. |
| 892 | */ |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 893 | start = sg_dma_address(sg); |
| 894 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { |
| 895 | if (sg_dma_len(tmp) == 0) |
| 896 | break; |
| 897 | sg = tmp; |
| 898 | } |
| 899 | end = sg_dma_address(sg) + sg_dma_len(sg); |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 900 | __iommu_dma_unmap(dev, start, end - start); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 901 | } |
| 902 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 903 | static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 904 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 905 | { |
| 906 | return __iommu_dma_map(dev, phys, size, |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 907 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 908 | } |
| 909 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 910 | static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 911 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 912 | { |
Robin Murphy | b61d271 | 2019-05-20 09:29:31 +0200 | [diff] [blame] | 913 | __iommu_dma_unmap(dev, handle, size); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 914 | } |
| 915 | |
Robin Murphy | 8553f6e | 2019-05-20 09:29:40 +0200 | [diff] [blame] | 916 | static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 917 | { |
| 918 | size_t alloc_size = PAGE_ALIGN(size); |
| 919 | int count = alloc_size >> PAGE_SHIFT; |
| 920 | struct page *page = NULL, **pages = NULL; |
| 921 | |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 922 | /* Non-coherent atomic allocation? Easy */ |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 923 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
| 924 | dma_free_from_pool(cpu_addr, alloc_size)) |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 925 | return; |
| 926 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 927 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 928 | /* |
| 929 | * If it the address is remapped, then it's either non-coherent |
| 930 | * or highmem CMA, or an iommu_dma_alloc_remap() construction. |
| 931 | */ |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame^] | 932 | pages = dma_common_find_pages(cpu_addr); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 933 | if (!pages) |
| 934 | page = vmalloc_to_page(cpu_addr); |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 935 | dma_common_free_remap(cpu_addr, alloc_size); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 936 | } else { |
| 937 | /* Lowmem means a coherent atomic or CMA allocation */ |
| 938 | page = virt_to_page(cpu_addr); |
| 939 | } |
| 940 | |
| 941 | if (pages) |
| 942 | __iommu_dma_free_pages(pages, count); |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 943 | if (page) |
| 944 | dma_free_contiguous(dev, page, alloc_size); |
Robin Murphy | bcf4b9c | 2019-05-20 09:29:36 +0200 | [diff] [blame] | 945 | } |
| 946 | |
Robin Murphy | 8553f6e | 2019-05-20 09:29:40 +0200 | [diff] [blame] | 947 | static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, |
| 948 | dma_addr_t handle, unsigned long attrs) |
| 949 | { |
| 950 | __iommu_dma_unmap(dev, handle, size); |
| 951 | __iommu_dma_free(dev, size, cpu_addr); |
| 952 | } |
| 953 | |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 954 | static void *iommu_dma_alloc_pages(struct device *dev, size_t size, |
| 955 | struct page **pagep, gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 956 | { |
| 957 | bool coherent = dev_is_dma_coherent(dev); |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 958 | size_t alloc_size = PAGE_ALIGN(size); |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 959 | int node = dev_to_node(dev); |
Christoph Hellwig | 9a4ab94 | 2019-05-20 09:29:39 +0200 | [diff] [blame] | 960 | struct page *page = NULL; |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 961 | void *cpu_addr; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 962 | |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 963 | page = dma_alloc_contiguous(dev, alloc_size, gfp); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 964 | if (!page) |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 965 | page = alloc_pages_node(node, gfp, get_order(alloc_size)); |
| 966 | if (!page) |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 967 | return NULL; |
| 968 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 969 | if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 970 | pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 971 | |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 972 | cpu_addr = dma_common_contiguous_remap(page, alloc_size, |
Christoph Hellwig | 5123174 | 2019-08-30 08:51:01 +0200 | [diff] [blame] | 973 | prot, __builtin_return_address(0)); |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 974 | if (!cpu_addr) |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 975 | goto out_free_pages; |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 976 | |
| 977 | if (!coherent) |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 978 | arch_dma_prep_coherent(page, size); |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 979 | } else { |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 980 | cpu_addr = page_address(page); |
Robin Murphy | 8680aa5 | 2019-05-20 09:29:38 +0200 | [diff] [blame] | 981 | } |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 982 | |
| 983 | *pagep = page; |
Robin Murphy | 9ad5d6e | 2019-05-20 09:29:41 +0200 | [diff] [blame] | 984 | memset(cpu_addr, 0, alloc_size); |
| 985 | return cpu_addr; |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 986 | out_free_pages: |
Nicolin Chen | 591fcf3 | 2019-06-03 15:52:59 -0700 | [diff] [blame] | 987 | dma_free_contiguous(dev, page, alloc_size); |
Robin Murphy | 072bebc | 2019-05-20 09:29:37 +0200 | [diff] [blame] | 988 | return NULL; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 989 | } |
| 990 | |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 991 | static void *iommu_dma_alloc(struct device *dev, size_t size, |
| 992 | dma_addr_t *handle, gfp_t gfp, unsigned long attrs) |
| 993 | { |
| 994 | bool coherent = dev_is_dma_coherent(dev); |
| 995 | int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); |
| 996 | struct page *page = NULL; |
| 997 | void *cpu_addr; |
| 998 | |
| 999 | gfp |= __GFP_ZERO; |
| 1000 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1001 | if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1002 | !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) |
| 1003 | return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); |
| 1004 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1005 | if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
| 1006 | !gfpflags_allow_blocking(gfp) && !coherent) |
Christoph Hellwig | ee1ef05 | 2019-05-20 09:29:42 +0200 | [diff] [blame] | 1007 | cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); |
| 1008 | else |
| 1009 | cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); |
| 1010 | if (!cpu_addr) |
| 1011 | return NULL; |
| 1012 | |
| 1013 | *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); |
| 1014 | if (*handle == DMA_MAPPING_ERROR) { |
| 1015 | __iommu_dma_free(dev, size, cpu_addr); |
| 1016 | return NULL; |
| 1017 | } |
| 1018 | |
| 1019 | return cpu_addr; |
| 1020 | } |
| 1021 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1022 | static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 1023 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 1024 | unsigned long attrs) |
| 1025 | { |
| 1026 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1027 | unsigned long pfn, off = vma->vm_pgoff; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1028 | int ret; |
| 1029 | |
Christoph Hellwig | 33dcb37 | 2019-07-26 09:26:40 +0200 | [diff] [blame] | 1030 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1031 | |
| 1032 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
| 1033 | return ret; |
| 1034 | |
| 1035 | if (off >= nr_pages || vma_pages(vma) > nr_pages - off) |
| 1036 | return -ENXIO; |
| 1037 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1038 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame^] | 1039 | struct page **pages = dma_common_find_pages(cpu_addr); |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1040 | |
| 1041 | if (pages) |
| 1042 | return __iommu_dma_mmap(pages, size, vma); |
| 1043 | pfn = vmalloc_to_pfn(cpu_addr); |
| 1044 | } else { |
| 1045 | pfn = page_to_pfn(virt_to_page(cpu_addr)); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1046 | } |
| 1047 | |
Christoph Hellwig | efd9f10 | 2019-05-20 09:29:44 +0200 | [diff] [blame] | 1048 | return remap_pfn_range(vma, vma->vm_start, pfn + off, |
| 1049 | vma->vm_end - vma->vm_start, |
| 1050 | vma->vm_page_prot); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1051 | } |
| 1052 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1053 | static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 1054 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 1055 | unsigned long attrs) |
| 1056 | { |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1057 | struct page *page; |
| 1058 | int ret; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1059 | |
Christoph Hellwig | e6475eb | 2019-05-20 09:29:45 +0200 | [diff] [blame] | 1060 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Christoph Hellwig | 5cf4537 | 2019-06-03 09:14:31 +0200 | [diff] [blame^] | 1061 | struct page **pages = dma_common_find_pages(cpu_addr); |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1062 | |
| 1063 | if (pages) { |
| 1064 | return sg_alloc_table_from_pages(sgt, pages, |
| 1065 | PAGE_ALIGN(size) >> PAGE_SHIFT, |
| 1066 | 0, size, GFP_KERNEL); |
| 1067 | } |
| 1068 | |
| 1069 | page = vmalloc_to_page(cpu_addr); |
| 1070 | } else { |
| 1071 | page = virt_to_page(cpu_addr); |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1072 | } |
| 1073 | |
Christoph Hellwig | 3fb3378 | 2019-05-20 09:29:43 +0200 | [diff] [blame] | 1074 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
| 1075 | if (!ret) |
| 1076 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
| 1077 | return ret; |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1078 | } |
| 1079 | |
Yoshihiro Shimoda | 158a6d3 | 2019-08-28 21:35:41 +0900 | [diff] [blame] | 1080 | static unsigned long iommu_dma_get_merge_boundary(struct device *dev) |
| 1081 | { |
| 1082 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
| 1083 | |
| 1084 | return (1UL << __ffs(domain->pgsize_bitmap)) - 1; |
| 1085 | } |
| 1086 | |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1087 | static const struct dma_map_ops iommu_dma_ops = { |
| 1088 | .alloc = iommu_dma_alloc, |
| 1089 | .free = iommu_dma_free, |
| 1090 | .mmap = iommu_dma_mmap, |
| 1091 | .get_sgtable = iommu_dma_get_sgtable, |
| 1092 | .map_page = iommu_dma_map_page, |
| 1093 | .unmap_page = iommu_dma_unmap_page, |
| 1094 | .map_sg = iommu_dma_map_sg, |
| 1095 | .unmap_sg = iommu_dma_unmap_sg, |
| 1096 | .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, |
| 1097 | .sync_single_for_device = iommu_dma_sync_single_for_device, |
| 1098 | .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, |
| 1099 | .sync_sg_for_device = iommu_dma_sync_sg_for_device, |
| 1100 | .map_resource = iommu_dma_map_resource, |
| 1101 | .unmap_resource = iommu_dma_unmap_resource, |
Yoshihiro Shimoda | 158a6d3 | 2019-08-28 21:35:41 +0900 | [diff] [blame] | 1102 | .get_merge_boundary = iommu_dma_get_merge_boundary, |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1103 | }; |
| 1104 | |
| 1105 | /* |
| 1106 | * The IOMMU core code allocates the default DMA domain, which the underlying |
| 1107 | * IOMMU driver needs to support via the dma-iommu layer. |
| 1108 | */ |
| 1109 | void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) |
| 1110 | { |
| 1111 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 1112 | |
| 1113 | if (!domain) |
| 1114 | goto out_err; |
| 1115 | |
| 1116 | /* |
| 1117 | * The IOMMU core code allocates the default DMA domain, which the |
| 1118 | * underlying IOMMU driver needs to support via the dma-iommu layer. |
| 1119 | */ |
| 1120 | if (domain->type == IOMMU_DOMAIN_DMA) { |
| 1121 | if (iommu_dma_init_domain(domain, dma_base, size, dev)) |
| 1122 | goto out_err; |
| 1123 | dev->dma_ops = &iommu_dma_ops; |
| 1124 | } |
| 1125 | |
| 1126 | return; |
| 1127 | out_err: |
| 1128 | pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", |
| 1129 | dev_name(dev)); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1130 | } |
| 1131 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1132 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
| 1133 | phys_addr_t msi_addr, struct iommu_domain *domain) |
| 1134 | { |
| 1135 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 1136 | struct iommu_dma_msi_page *msi_page; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 1137 | dma_addr_t iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1138 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 1139 | size_t size = cookie_msi_granule(cookie); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1140 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 1141 | msi_addr &= ~(phys_addr_t)(size - 1); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1142 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
| 1143 | if (msi_page->phys == msi_addr) |
| 1144 | return msi_page; |
| 1145 | |
| 1146 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); |
| 1147 | if (!msi_page) |
| 1148 | return NULL; |
| 1149 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1150 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
| 1151 | if (!iova) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 1152 | goto out_free_page; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1153 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1154 | if (iommu_map(domain, iova, msi_addr, size, prot)) |
| 1155 | goto out_free_iova; |
| 1156 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1157 | INIT_LIST_HEAD(&msi_page->list); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 1158 | msi_page->phys = msi_addr; |
| 1159 | msi_page->iova = iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1160 | list_add(&msi_page->list, &cookie->msi_page_list); |
| 1161 | return msi_page; |
| 1162 | |
Robin Murphy | 8af23fa | 2019-07-29 16:32:38 +0100 | [diff] [blame] | 1163 | out_free_iova: |
| 1164 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1165 | out_free_page: |
| 1166 | kfree(msi_page); |
| 1167 | return NULL; |
| 1168 | } |
| 1169 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1170 | int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1171 | { |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1172 | struct device *dev = msi_desc_to_dev(desc); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1173 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 1174 | struct iommu_dma_cookie *cookie; |
| 1175 | struct iommu_dma_msi_page *msi_page; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1176 | unsigned long flags; |
| 1177 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1178 | if (!domain || !domain->iova_cookie) { |
| 1179 | desc->iommu_cookie = NULL; |
| 1180 | return 0; |
| 1181 | } |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1182 | |
| 1183 | cookie = domain->iova_cookie; |
| 1184 | |
| 1185 | /* |
| 1186 | * We disable IRQs to rule out a possible inversion against |
| 1187 | * irq_desc_lock if, say, someone tries to retarget the affinity |
| 1188 | * of an MSI from within an IPI handler. |
| 1189 | */ |
| 1190 | spin_lock_irqsave(&cookie->msi_lock, flags); |
| 1191 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
| 1192 | spin_unlock_irqrestore(&cookie->msi_lock, flags); |
| 1193 | |
Julien Grall | ece6e6f | 2019-05-01 14:58:19 +0100 | [diff] [blame] | 1194 | msi_desc_set_iommu_cookie(desc, msi_page); |
| 1195 | |
| 1196 | if (!msi_page) |
| 1197 | return -ENOMEM; |
| 1198 | return 0; |
| 1199 | } |
| 1200 | |
| 1201 | void iommu_dma_compose_msi_msg(struct msi_desc *desc, |
| 1202 | struct msi_msg *msg) |
| 1203 | { |
| 1204 | struct device *dev = msi_desc_to_dev(desc); |
| 1205 | const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 1206 | const struct iommu_dma_msi_page *msi_page; |
| 1207 | |
| 1208 | msi_page = msi_desc_get_iommu_cookie(desc); |
| 1209 | |
| 1210 | if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) |
| 1211 | return; |
| 1212 | |
| 1213 | msg->address_hi = upper_32_bits(msi_page->iova); |
| 1214 | msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; |
| 1215 | msg->address_lo += lower_32_bits(msi_page->iova); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 1216 | } |
Christoph Hellwig | 06d6072 | 2019-05-20 09:29:29 +0200 | [diff] [blame] | 1217 | |
| 1218 | static int iommu_dma_init(void) |
| 1219 | { |
| 1220 | return iova_cache_get(); |
| 1221 | } |
| 1222 | arch_initcall(iommu_dma_init); |