Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 1 | /* |
| 2 | * A fairly generic DMA-API to IOMMU-API glue layer. |
| 3 | * |
| 4 | * Copyright (C) 2014-2015 ARM Ltd. |
| 5 | * |
| 6 | * based in part on arch/arm/mm/dma-mapping.c: |
| 7 | * Copyright (C) 2000-2004 Russell King |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | */ |
| 21 | |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 22 | #include <linux/acpi_iort.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 23 | #include <linux/device.h> |
| 24 | #include <linux/dma-iommu.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 25 | #include <linux/gfp.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 26 | #include <linux/huge_mm.h> |
| 27 | #include <linux/iommu.h> |
| 28 | #include <linux/iova.h> |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 29 | #include <linux/irq.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 30 | #include <linux/mm.h> |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 31 | #include <linux/pci.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 32 | #include <linux/scatterlist.h> |
| 33 | #include <linux/vmalloc.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 34 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 35 | struct iommu_dma_msi_page { |
| 36 | struct list_head list; |
| 37 | dma_addr_t iova; |
| 38 | phys_addr_t phys; |
| 39 | }; |
| 40 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 41 | enum iommu_dma_cookie_type { |
| 42 | IOMMU_DMA_IOVA_COOKIE, |
| 43 | IOMMU_DMA_MSI_COOKIE, |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 44 | }; |
| 45 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 46 | struct iommu_dma_cookie { |
| 47 | enum iommu_dma_cookie_type type; |
| 48 | union { |
| 49 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
| 50 | struct iova_domain iovad; |
| 51 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ |
| 52 | dma_addr_t msi_iova; |
| 53 | }; |
| 54 | struct list_head msi_page_list; |
| 55 | spinlock_t msi_lock; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 56 | |
| 57 | /* Domain for flush queue callback; NULL if flush queue not in use */ |
| 58 | struct iommu_domain *fq_domain; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 59 | }; |
| 60 | |
| 61 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) |
| 62 | { |
| 63 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) |
| 64 | return cookie->iovad.granule; |
| 65 | return PAGE_SIZE; |
| 66 | } |
| 67 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 68 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) |
| 69 | { |
| 70 | struct iommu_dma_cookie *cookie; |
| 71 | |
| 72 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
| 73 | if (cookie) { |
| 74 | spin_lock_init(&cookie->msi_lock); |
| 75 | INIT_LIST_HEAD(&cookie->msi_page_list); |
| 76 | cookie->type = type; |
| 77 | } |
| 78 | return cookie; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 79 | } |
| 80 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 81 | int iommu_dma_init(void) |
| 82 | { |
| 83 | return iova_cache_get(); |
| 84 | } |
| 85 | |
| 86 | /** |
| 87 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain |
| 88 | * @domain: IOMMU domain to prepare for DMA-API usage |
| 89 | * |
| 90 | * IOMMU drivers should normally call this from their domain_alloc |
| 91 | * callback when domain->type == IOMMU_DOMAIN_DMA. |
| 92 | */ |
| 93 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
| 94 | { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 95 | if (domain->iova_cookie) |
| 96 | return -EEXIST; |
| 97 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 98 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); |
| 99 | if (!domain->iova_cookie) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 100 | return -ENOMEM; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 101 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 102 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 103 | } |
| 104 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
| 105 | |
| 106 | /** |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 107 | * iommu_get_msi_cookie - Acquire just MSI remapping resources |
| 108 | * @domain: IOMMU domain to prepare |
| 109 | * @base: Start address of IOVA region for MSI mappings |
| 110 | * |
| 111 | * Users who manage their own IOVA allocation and do not want DMA API support, |
| 112 | * but would still like to take advantage of automatic MSI remapping, can use |
| 113 | * this to initialise their own domain appropriately. Users should reserve a |
| 114 | * contiguous IOVA region, starting at @base, large enough to accommodate the |
| 115 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address |
| 116 | * used by the devices attached to @domain. |
| 117 | */ |
| 118 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) |
| 119 | { |
| 120 | struct iommu_dma_cookie *cookie; |
| 121 | |
| 122 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) |
| 123 | return -EINVAL; |
| 124 | |
| 125 | if (domain->iova_cookie) |
| 126 | return -EEXIST; |
| 127 | |
| 128 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
| 129 | if (!cookie) |
| 130 | return -ENOMEM; |
| 131 | |
| 132 | cookie->msi_iova = base; |
| 133 | domain->iova_cookie = cookie; |
| 134 | return 0; |
| 135 | } |
| 136 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
| 137 | |
| 138 | /** |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 139 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 140 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
| 141 | * iommu_get_msi_cookie() |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 142 | * |
| 143 | * IOMMU drivers should normally call this from their domain_free callback. |
| 144 | */ |
| 145 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
| 146 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 147 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 148 | struct iommu_dma_msi_page *msi, *tmp; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 149 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 150 | if (!cookie) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 151 | return; |
| 152 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 153 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 154 | put_iova_domain(&cookie->iovad); |
| 155 | |
| 156 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
| 157 | list_del(&msi->list); |
| 158 | kfree(msi); |
| 159 | } |
| 160 | kfree(cookie); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 161 | domain->iova_cookie = NULL; |
| 162 | } |
| 163 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
| 164 | |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 165 | /** |
| 166 | * iommu_dma_get_resv_regions - Reserved region driver helper |
| 167 | * @dev: Device from iommu_get_resv_regions() |
| 168 | * @list: Reserved region list from iommu_get_resv_regions() |
| 169 | * |
| 170 | * IOMMU drivers can use this to implement their .get_resv_regions callback |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 171 | * for general non-IOMMU-specific reservations. Currently, this covers GICv3 |
| 172 | * ITS region reservation on ACPI based ARM platforms that may require HW MSI |
| 173 | * reservation. |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 174 | */ |
| 175 | void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 176 | { |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 177 | |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 178 | if (!is_of_node(dev->iommu_fwspec->iommu_fwnode)) |
| 179 | iort_iommu_msi_get_resv_regions(dev, list); |
Shameer Kolothum | f51dc89 | 2018-02-13 15:20:51 +0000 | [diff] [blame] | 180 | |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 181 | } |
Robin Murphy | 273df96 | 2017-03-16 17:00:19 +0000 | [diff] [blame] | 182 | EXPORT_SYMBOL(iommu_dma_get_resv_regions); |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 183 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 184 | static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, |
| 185 | phys_addr_t start, phys_addr_t end) |
| 186 | { |
| 187 | struct iova_domain *iovad = &cookie->iovad; |
| 188 | struct iommu_dma_msi_page *msi_page; |
| 189 | int i, num_pages; |
| 190 | |
| 191 | start -= iova_offset(iovad, start); |
| 192 | num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); |
| 193 | |
| 194 | msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); |
| 195 | if (!msi_page) |
| 196 | return -ENOMEM; |
| 197 | |
| 198 | for (i = 0; i < num_pages; i++) { |
| 199 | msi_page[i].phys = start; |
| 200 | msi_page[i].iova = start; |
| 201 | INIT_LIST_HEAD(&msi_page[i].list); |
| 202 | list_add(&msi_page[i].list, &cookie->msi_page_list); |
| 203 | start += iovad->granule; |
| 204 | } |
| 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 209 | static void iova_reserve_pci_windows(struct pci_dev *dev, |
| 210 | struct iova_domain *iovad) |
| 211 | { |
| 212 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
| 213 | struct resource_entry *window; |
| 214 | unsigned long lo, hi; |
| 215 | |
| 216 | resource_list_for_each_entry(window, &bridge->windows) { |
| 217 | if (resource_type(window->res) != IORESOURCE_MEM) |
| 218 | continue; |
| 219 | |
| 220 | lo = iova_pfn(iovad, window->res->start - window->offset); |
| 221 | hi = iova_pfn(iovad, window->res->end - window->offset); |
| 222 | reserve_iova(iovad, lo, hi); |
| 223 | } |
| 224 | } |
| 225 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 226 | static int iova_reserve_iommu_regions(struct device *dev, |
| 227 | struct iommu_domain *domain) |
| 228 | { |
| 229 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 230 | struct iova_domain *iovad = &cookie->iovad; |
| 231 | struct iommu_resv_region *region; |
| 232 | LIST_HEAD(resv_regions); |
| 233 | int ret = 0; |
| 234 | |
Shameer Kolothum | cd2c9fc | 2018-04-18 12:40:42 +0100 | [diff] [blame] | 235 | if (dev_is_pci(dev)) |
| 236 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
| 237 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 238 | iommu_get_resv_regions(dev, &resv_regions); |
| 239 | list_for_each_entry(region, &resv_regions, list) { |
| 240 | unsigned long lo, hi; |
| 241 | |
| 242 | /* We ARE the software that manages these! */ |
| 243 | if (region->type == IOMMU_RESV_SW_MSI) |
| 244 | continue; |
| 245 | |
| 246 | lo = iova_pfn(iovad, region->start); |
| 247 | hi = iova_pfn(iovad, region->start + region->length - 1); |
| 248 | reserve_iova(iovad, lo, hi); |
| 249 | |
| 250 | if (region->type == IOMMU_RESV_MSI) |
| 251 | ret = cookie_init_hw_msi_region(cookie, region->start, |
| 252 | region->start + region->length); |
| 253 | if (ret) |
| 254 | break; |
| 255 | } |
| 256 | iommu_put_resv_regions(dev, &resv_regions); |
| 257 | |
| 258 | return ret; |
| 259 | } |
| 260 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 261 | static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) |
| 262 | { |
| 263 | struct iommu_dma_cookie *cookie; |
| 264 | struct iommu_domain *domain; |
| 265 | |
| 266 | cookie = container_of(iovad, struct iommu_dma_cookie, iovad); |
| 267 | domain = cookie->fq_domain; |
| 268 | /* |
| 269 | * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE |
| 270 | * implies that ops->flush_iotlb_all must be non-NULL. |
| 271 | */ |
| 272 | domain->ops->flush_iotlb_all(domain); |
| 273 | } |
| 274 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 275 | /** |
| 276 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
| 277 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
| 278 | * @base: IOVA at which the mappable address space starts |
| 279 | * @size: Size of IOVA space |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 280 | * @dev: Device the domain is being initialised for |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 281 | * |
| 282 | * @base and @size should be exact multiples of IOMMU page granularity to |
| 283 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
| 284 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
| 285 | * any change which could make prior IOVAs invalid will fail. |
| 286 | */ |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 287 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
| 288 | u64 size, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 289 | { |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 290 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 291 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 292 | unsigned long order, base_pfn, end_pfn; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 293 | int attr; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 294 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 295 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
| 296 | return -EINVAL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 297 | |
| 298 | /* Use the smallest supported page size for IOVA granularity */ |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 299 | order = __ffs(domain->pgsize_bitmap); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 300 | base_pfn = max_t(unsigned long, 1, base >> order); |
| 301 | end_pfn = (base + size - 1) >> order; |
| 302 | |
| 303 | /* Check the domain allows at least some access to the device... */ |
| 304 | if (domain->geometry.force_aperture) { |
| 305 | if (base > domain->geometry.aperture_end || |
| 306 | base + size <= domain->geometry.aperture_start) { |
| 307 | pr_warn("specified DMA range outside IOMMU capability\n"); |
| 308 | return -EFAULT; |
| 309 | } |
| 310 | /* ...then finally give it a kicking to make sure it fits */ |
| 311 | base_pfn = max_t(unsigned long, base_pfn, |
| 312 | domain->geometry.aperture_start >> order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 313 | } |
| 314 | |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 315 | /* start_pfn is always nonzero for an already-initialised domain */ |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 316 | if (iovad->start_pfn) { |
| 317 | if (1UL << order != iovad->granule || |
Robin Murphy | f51d7bb | 2017-01-16 13:24:54 +0000 | [diff] [blame] | 318 | base_pfn != iovad->start_pfn) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 319 | pr_warn("Incompatible range for DMA domain\n"); |
| 320 | return -EFAULT; |
| 321 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 322 | |
| 323 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 324 | } |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 325 | |
Zhen Lei | aa3ac94 | 2017-09-21 16:52:45 +0100 | [diff] [blame] | 326 | init_iova_domain(iovad, 1UL << order, base_pfn); |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 327 | |
| 328 | if (!cookie->fq_domain && !iommu_domain_get_attr(domain, |
| 329 | DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { |
| 330 | cookie->fq_domain = domain; |
| 331 | init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); |
| 332 | } |
| 333 | |
Robin Murphy | 7c1b058 | 2017-03-16 17:00:18 +0000 | [diff] [blame] | 334 | if (!dev) |
| 335 | return 0; |
| 336 | |
| 337 | return iova_reserve_iommu_regions(dev, domain); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 338 | } |
| 339 | EXPORT_SYMBOL(iommu_dma_init_domain); |
| 340 | |
| 341 | /** |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 342 | * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API |
| 343 | * page flags. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 344 | * @dir: Direction of DMA transfer |
| 345 | * @coherent: Is the DMA master cache-coherent? |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 346 | * @attrs: DMA attributes for the mapping |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 347 | * |
| 348 | * Return: corresponding IOMMU API page protection flags |
| 349 | */ |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 350 | int dma_info_to_prot(enum dma_data_direction dir, bool coherent, |
| 351 | unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 352 | { |
| 353 | int prot = coherent ? IOMMU_CACHE : 0; |
| 354 | |
Mitchel Humpherys | 737c85c | 2017-01-06 18:58:12 +0530 | [diff] [blame] | 355 | if (attrs & DMA_ATTR_PRIVILEGED) |
| 356 | prot |= IOMMU_PRIV; |
| 357 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 358 | switch (dir) { |
| 359 | case DMA_BIDIRECTIONAL: |
| 360 | return prot | IOMMU_READ | IOMMU_WRITE; |
| 361 | case DMA_TO_DEVICE: |
| 362 | return prot | IOMMU_READ; |
| 363 | case DMA_FROM_DEVICE: |
| 364 | return prot | IOMMU_WRITE; |
| 365 | default: |
| 366 | return 0; |
| 367 | } |
| 368 | } |
| 369 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 370 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
| 371 | size_t size, dma_addr_t dma_limit, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 372 | { |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 373 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 374 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 375 | unsigned long shift, iova_len, iova = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 376 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 377 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) { |
| 378 | cookie->msi_iova += size; |
| 379 | return cookie->msi_iova - size; |
| 380 | } |
| 381 | |
| 382 | shift = iova_shift(iovad); |
| 383 | iova_len = size >> shift; |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 384 | /* |
| 385 | * Freeing non-power-of-two-sized allocations back into the IOVA caches |
| 386 | * will come back to bite us badly, so we have to waste a bit of space |
| 387 | * rounding up anything cacheable to make sure that can't happen. The |
| 388 | * order of the unadjusted size will still match upon freeing. |
| 389 | */ |
| 390 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
| 391 | iova_len = roundup_pow_of_two(iova_len); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 392 | |
Robin Murphy | 03bfdc3 | 2018-07-23 23:16:10 +0100 | [diff] [blame] | 393 | if (dev->bus_dma_mask) |
| 394 | dma_limit &= dev->bus_dma_mask; |
| 395 | |
Robin Murphy | c987ff0 | 2016-08-09 17:31:35 +0100 | [diff] [blame] | 396 | if (domain->geometry.force_aperture) |
| 397 | dma_limit = min(dma_limit, domain->geometry.aperture_end); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 398 | |
| 399 | /* Try to get PCI devices a SAC address */ |
| 400 | if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 401 | iova = alloc_iova_fast(iovad, iova_len, |
| 402 | DMA_BIT_MASK(32) >> shift, false); |
Robin Murphy | 122fac0 | 2017-01-16 13:24:55 +0000 | [diff] [blame] | 403 | |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 404 | if (!iova) |
Tomasz Nowicki | 538d5b3 | 2017-09-20 10:52:02 +0200 | [diff] [blame] | 405 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, |
| 406 | true); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 407 | |
| 408 | return (dma_addr_t)iova << shift; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 409 | } |
| 410 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 411 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
| 412 | dma_addr_t iova, size_t size) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 413 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 414 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 415 | |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 416 | /* The MSI case is only ever cleaning up its most recent allocation */ |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 417 | if (cookie->type == IOMMU_DMA_MSI_COOKIE) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 418 | cookie->msi_iova -= size; |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 419 | else if (cookie->fq_domain) /* non-strict mode */ |
| 420 | queue_iova(iovad, iova_pfn(iovad, iova), |
| 421 | size >> iova_shift(iovad), 0); |
Robin Murphy | bb65a64 | 2017-03-31 15:46:07 +0100 | [diff] [blame] | 422 | else |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 423 | free_iova_fast(iovad, iova_pfn(iovad, iova), |
| 424 | size >> iova_shift(iovad)); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, |
| 428 | size_t size) |
| 429 | { |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 430 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 431 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 432 | size_t iova_off = iova_offset(iovad, dma_addr); |
| 433 | |
| 434 | dma_addr -= iova_off; |
| 435 | size = iova_align(iovad, size + iova_off); |
| 436 | |
Zhen Lei | 2da274c | 2018-09-20 17:10:22 +0100 | [diff] [blame] | 437 | WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size); |
| 438 | if (!cookie->fq_domain) |
| 439 | iommu_tlb_sync(domain); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 440 | iommu_dma_free_iova(cookie, dma_addr, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 441 | } |
| 442 | |
| 443 | static void __iommu_dma_free_pages(struct page **pages, int count) |
| 444 | { |
| 445 | while (count--) |
| 446 | __free_page(pages[count]); |
| 447 | kvfree(pages); |
| 448 | } |
| 449 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 450 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
| 451 | unsigned long order_mask, gfp_t gfp) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 452 | { |
| 453 | struct page **pages; |
| 454 | unsigned int i = 0, array_size = count * sizeof(*pages); |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 455 | |
| 456 | order_mask &= (2U << MAX_ORDER) - 1; |
| 457 | if (!order_mask) |
| 458 | return NULL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 459 | |
| 460 | if (array_size <= PAGE_SIZE) |
| 461 | pages = kzalloc(array_size, GFP_KERNEL); |
| 462 | else |
| 463 | pages = vzalloc(array_size); |
| 464 | if (!pages) |
| 465 | return NULL; |
| 466 | |
| 467 | /* IOMMU can map any pages, so himem can also be used here */ |
| 468 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; |
| 469 | |
| 470 | while (count) { |
| 471 | struct page *page = NULL; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 472 | unsigned int order_size; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 473 | |
| 474 | /* |
| 475 | * Higher-order allocations are a convenience rather |
| 476 | * than a necessity, hence using __GFP_NORETRY until |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 477 | * falling back to minimum-order allocations. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 478 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 479 | for (order_mask &= (2U << __fls(count)) - 1; |
| 480 | order_mask; order_mask &= ~order_size) { |
| 481 | unsigned int order = __fls(order_mask); |
| 482 | |
| 483 | order_size = 1U << order; |
| 484 | page = alloc_pages((order_mask - order_size) ? |
| 485 | gfp | __GFP_NORETRY : gfp, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 486 | if (!page) |
| 487 | continue; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 488 | if (!order) |
| 489 | break; |
| 490 | if (!PageCompound(page)) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 491 | split_page(page, order); |
| 492 | break; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 493 | } else if (!split_huge_page(page)) { |
| 494 | break; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 495 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 496 | __free_pages(page, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 497 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 498 | if (!page) { |
| 499 | __iommu_dma_free_pages(pages, i); |
| 500 | return NULL; |
| 501 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 502 | count -= order_size; |
| 503 | while (order_size--) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 504 | pages[i++] = page++; |
| 505 | } |
| 506 | return pages; |
| 507 | } |
| 508 | |
| 509 | /** |
| 510 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() |
| 511 | * @dev: Device which owns this buffer |
| 512 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() |
| 513 | * @size: Size of buffer in bytes |
| 514 | * @handle: DMA address of buffer |
| 515 | * |
| 516 | * Frees both the pages associated with the buffer, and the array |
| 517 | * describing them |
| 518 | */ |
| 519 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
| 520 | dma_addr_t *handle) |
| 521 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 522 | __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 523 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 524 | *handle = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | /** |
| 528 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space |
| 529 | * @dev: Device to allocate memory for. Must be a real device |
| 530 | * attached to an iommu_dma_domain |
| 531 | * @size: Size of buffer in bytes |
| 532 | * @gfp: Allocation flags |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 533 | * @attrs: DMA attributes for this allocation |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 534 | * @prot: IOMMU mapping flags |
| 535 | * @handle: Out argument for allocated DMA handle |
| 536 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the |
| 537 | * given VA/PA are visible to the given non-coherent device. |
| 538 | * |
| 539 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, |
| 540 | * but an IOMMU which supports smaller pages might not map the whole thing. |
| 541 | * |
| 542 | * Return: Array of struct page pointers describing the buffer, |
| 543 | * or NULL on failure. |
| 544 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 545 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 546 | unsigned long attrs, int prot, dma_addr_t *handle, |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 547 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
| 548 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 549 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 550 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 551 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 552 | struct page **pages; |
| 553 | struct sg_table sgt; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 554 | dma_addr_t iova; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 555 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 556 | |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 557 | *handle = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 558 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 559 | min_size = alloc_sizes & -alloc_sizes; |
| 560 | if (min_size < PAGE_SIZE) { |
| 561 | min_size = PAGE_SIZE; |
| 562 | alloc_sizes |= PAGE_SIZE; |
| 563 | } else { |
| 564 | size = ALIGN(size, min_size); |
| 565 | } |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 566 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 567 | alloc_sizes = min_size; |
| 568 | |
| 569 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 570 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 571 | if (!pages) |
| 572 | return NULL; |
| 573 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 574 | size = iova_align(iovad, size); |
| 575 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 576 | if (!iova) |
| 577 | goto out_free_pages; |
| 578 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 579 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
| 580 | goto out_free_iova; |
| 581 | |
| 582 | if (!(prot & IOMMU_CACHE)) { |
| 583 | struct sg_mapping_iter miter; |
| 584 | /* |
| 585 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't |
| 586 | * sufficient here, so skip it by using the "wrong" direction. |
| 587 | */ |
| 588 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); |
| 589 | while (sg_miter_next(&miter)) |
| 590 | flush_page(dev, miter.addr, page_to_phys(miter.page)); |
| 591 | sg_miter_stop(&miter); |
| 592 | } |
| 593 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 594 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 595 | < size) |
| 596 | goto out_free_sg; |
| 597 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 598 | *handle = iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 599 | sg_free_table(&sgt); |
| 600 | return pages; |
| 601 | |
| 602 | out_free_sg: |
| 603 | sg_free_table(&sgt); |
| 604 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 605 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 606 | out_free_pages: |
| 607 | __iommu_dma_free_pages(pages, count); |
| 608 | return NULL; |
| 609 | } |
| 610 | |
| 611 | /** |
| 612 | * iommu_dma_mmap - Map a buffer into provided user VMA |
| 613 | * @pages: Array representing buffer from iommu_dma_alloc() |
| 614 | * @size: Size of buffer in bytes |
| 615 | * @vma: VMA describing requested userspace mapping |
| 616 | * |
| 617 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible |
| 618 | * for verifying the correct size and protection of @vma beforehand. |
| 619 | */ |
| 620 | |
| 621 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) |
| 622 | { |
| 623 | unsigned long uaddr = vma->vm_start; |
| 624 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 625 | int ret = -ENXIO; |
| 626 | |
| 627 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { |
| 628 | ret = vm_insert_page(vma, uaddr, pages[i]); |
| 629 | if (ret) |
| 630 | break; |
| 631 | uaddr += PAGE_SIZE; |
| 632 | } |
| 633 | return ret; |
| 634 | } |
| 635 | |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 636 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 637 | size_t size, int prot, struct iommu_domain *domain) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 638 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 639 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 640 | size_t iova_off = 0; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 641 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 642 | |
Robin Murphy | 1cc896e | 2017-05-15 16:01:30 +0100 | [diff] [blame] | 643 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) { |
| 644 | iova_off = iova_offset(&cookie->iovad, phys); |
| 645 | size = iova_align(&cookie->iovad, size + iova_off); |
| 646 | } |
| 647 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 648 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 649 | if (!iova) |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 650 | return DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 651 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 652 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
| 653 | iommu_dma_free_iova(cookie, iova, size); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 654 | return DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 655 | } |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 656 | return iova + iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 657 | } |
| 658 | |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 659 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
| 660 | unsigned long offset, size_t size, int prot) |
| 661 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 662 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, |
| 663 | iommu_get_dma_domain(dev)); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 664 | } |
| 665 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 666 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 667 | enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 668 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 669 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | /* |
| 673 | * Prepare a successfully-mapped scatterlist to give back to the caller. |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 674 | * |
| 675 | * At this point the segments are already laid out by iommu_dma_map_sg() to |
| 676 | * avoid individually crossing any boundaries, so we merely need to check a |
| 677 | * segment's start address to avoid concatenating across one. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 678 | */ |
| 679 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 680 | dma_addr_t dma_addr) |
| 681 | { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 682 | struct scatterlist *s, *cur = sg; |
| 683 | unsigned long seg_mask = dma_get_seg_boundary(dev); |
| 684 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); |
| 685 | int i, count = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 686 | |
| 687 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 688 | /* Restore this segment's original unaligned fields first */ |
| 689 | unsigned int s_iova_off = sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 690 | unsigned int s_length = sg_dma_len(s); |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 691 | unsigned int s_iova_len = s->length; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 692 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 693 | s->offset += s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 694 | s->length = s_length; |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 695 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 696 | sg_dma_len(s) = 0; |
| 697 | |
| 698 | /* |
| 699 | * Now fill in the real DMA data. If... |
| 700 | * - there is a valid output segment to append to |
| 701 | * - and this segment starts on an IOVA page boundary |
| 702 | * - but doesn't fall at a segment boundary |
| 703 | * - and wouldn't make the resulting output segment too long |
| 704 | */ |
| 705 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && |
| 706 | (cur_len + s_length <= max_len)) { |
| 707 | /* ...then concatenate it with the previous one */ |
| 708 | cur_len += s_length; |
| 709 | } else { |
| 710 | /* Otherwise start the next output segment */ |
| 711 | if (i > 0) |
| 712 | cur = sg_next(cur); |
| 713 | cur_len = s_length; |
| 714 | count++; |
| 715 | |
| 716 | sg_dma_address(cur) = dma_addr + s_iova_off; |
| 717 | } |
| 718 | |
| 719 | sg_dma_len(cur) = cur_len; |
| 720 | dma_addr += s_iova_len; |
| 721 | |
| 722 | if (s_length + s_iova_off < s_iova_len) |
| 723 | cur_len = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 724 | } |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 725 | return count; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 726 | } |
| 727 | |
| 728 | /* |
| 729 | * If mapping failed, then just restore the original list, |
| 730 | * but making sure the DMA fields are invalidated. |
| 731 | */ |
| 732 | static void __invalidate_sg(struct scatterlist *sg, int nents) |
| 733 | { |
| 734 | struct scatterlist *s; |
| 735 | int i; |
| 736 | |
| 737 | for_each_sg(sg, s, nents, i) { |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 738 | if (sg_dma_address(s) != DMA_MAPPING_ERROR) |
Robin Murphy | 07b48ac | 2016-03-10 19:28:12 +0000 | [diff] [blame] | 739 | s->offset += sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 740 | if (sg_dma_len(s)) |
| 741 | s->length = sg_dma_len(s); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 742 | sg_dma_address(s) = DMA_MAPPING_ERROR; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 743 | sg_dma_len(s) = 0; |
| 744 | } |
| 745 | } |
| 746 | |
| 747 | /* |
| 748 | * The DMA API client is passing in a scatterlist which could describe |
| 749 | * any old buffer layout, but the IOMMU API requires everything to be |
| 750 | * aligned to IOMMU pages. Hence the need for this complicated bit of |
| 751 | * impedance-matching, to be able to hand off a suitably-aligned list, |
| 752 | * but still preserve the original offsets and sizes for the caller. |
| 753 | */ |
| 754 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 755 | int nents, int prot) |
| 756 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 757 | struct iommu_domain *domain = iommu_get_dma_domain(dev); |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 758 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 759 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 760 | struct scatterlist *s, *prev = NULL; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 761 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 762 | size_t iova_len = 0; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 763 | unsigned long mask = dma_get_seg_boundary(dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 764 | int i; |
| 765 | |
| 766 | /* |
| 767 | * Work out how much IOVA space we need, and align the segments to |
| 768 | * IOVA granules for the IOMMU driver to handle. With some clever |
| 769 | * trickery we can modify the list in-place, but reversibly, by |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 770 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 771 | */ |
| 772 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 773 | size_t s_iova_off = iova_offset(iovad, s->offset); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 774 | size_t s_length = s->length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 775 | size_t pad_len = (mask - iova_len + 1) & mask; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 776 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 777 | sg_dma_address(s) = s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 778 | sg_dma_len(s) = s_length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 779 | s->offset -= s_iova_off; |
| 780 | s_length = iova_align(iovad, s_length + s_iova_off); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 781 | s->length = s_length; |
| 782 | |
| 783 | /* |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 784 | * Due to the alignment of our single IOVA allocation, we can |
| 785 | * depend on these assumptions about the segment boundary mask: |
| 786 | * - If mask size >= IOVA size, then the IOVA range cannot |
| 787 | * possibly fall across a boundary, so we don't care. |
| 788 | * - If mask size < IOVA size, then the IOVA range must start |
| 789 | * exactly on a boundary, therefore we can lay things out |
| 790 | * based purely on segment lengths without needing to know |
| 791 | * the actual addresses beforehand. |
| 792 | * - The mask must be a power of 2, so pad_len == 0 if |
| 793 | * iova_len == 0, thus we cannot dereference prev the first |
| 794 | * time through here (i.e. before it has a meaningful value). |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 795 | */ |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 796 | if (pad_len && pad_len < s_length - 1) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 797 | prev->length += pad_len; |
| 798 | iova_len += pad_len; |
| 799 | } |
| 800 | |
| 801 | iova_len += s_length; |
| 802 | prev = s; |
| 803 | } |
| 804 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 805 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 806 | if (!iova) |
| 807 | goto out_restore_sg; |
| 808 | |
| 809 | /* |
| 810 | * We'll leave any physical concatenation to the IOMMU driver's |
| 811 | * implementation - it knows better than we do. |
| 812 | */ |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 813 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 814 | goto out_free_iova; |
| 815 | |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 816 | return __finalise_sg(dev, sg, nents, iova); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 817 | |
| 818 | out_free_iova: |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 819 | iommu_dma_free_iova(cookie, iova, iova_len); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 820 | out_restore_sg: |
| 821 | __invalidate_sg(sg, nents); |
| 822 | return 0; |
| 823 | } |
| 824 | |
| 825 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 826 | enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 827 | { |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 828 | dma_addr_t start, end; |
| 829 | struct scatterlist *tmp; |
| 830 | int i; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 831 | /* |
| 832 | * The scatterlist segments are mapped into a single |
| 833 | * contiguous IOVA allocation, so this is incredibly easy. |
| 834 | */ |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 835 | start = sg_dma_address(sg); |
| 836 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { |
| 837 | if (sg_dma_len(tmp) == 0) |
| 838 | break; |
| 839 | sg = tmp; |
| 840 | } |
| 841 | end = sg_dma_address(sg) + sg_dma_len(sg); |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 842 | __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 843 | } |
| 844 | |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 845 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
| 846 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 847 | { |
| 848 | return __iommu_dma_map(dev, phys, size, |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 849 | dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, |
| 850 | iommu_get_dma_domain(dev)); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 851 | } |
| 852 | |
| 853 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
| 854 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 855 | { |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 856 | __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); |
Robin Murphy | 51f8cc9 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 857 | } |
| 858 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 859 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
| 860 | phys_addr_t msi_addr, struct iommu_domain *domain) |
| 861 | { |
| 862 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 863 | struct iommu_dma_msi_page *msi_page; |
Robin Murphy | 842fe51 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 864 | dma_addr_t iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 865 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 866 | size_t size = cookie_msi_granule(cookie); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 867 | |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 868 | msi_addr &= ~(phys_addr_t)(size - 1); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 869 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
| 870 | if (msi_page->phys == msi_addr) |
| 871 | return msi_page; |
| 872 | |
| 873 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); |
| 874 | if (!msi_page) |
| 875 | return NULL; |
| 876 | |
Robin Murphy | 43c5bf1 | 2018-09-12 16:24:13 +0100 | [diff] [blame] | 877 | iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); |
Christoph Hellwig | cad34be | 2018-11-21 19:35:19 +0100 | [diff] [blame^] | 878 | if (iova == DMA_MAPPING_ERROR) |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 879 | goto out_free_page; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 880 | |
| 881 | INIT_LIST_HEAD(&msi_page->list); |
Robin Murphy | a44e665 | 2017-03-31 15:46:06 +0100 | [diff] [blame] | 882 | msi_page->phys = msi_addr; |
| 883 | msi_page->iova = iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 884 | list_add(&msi_page->list, &cookie->msi_page_list); |
| 885 | return msi_page; |
| 886 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 887 | out_free_page: |
| 888 | kfree(msi_page); |
| 889 | return NULL; |
| 890 | } |
| 891 | |
| 892 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) |
| 893 | { |
| 894 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); |
| 895 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 896 | struct iommu_dma_cookie *cookie; |
| 897 | struct iommu_dma_msi_page *msi_page; |
| 898 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; |
| 899 | unsigned long flags; |
| 900 | |
| 901 | if (!domain || !domain->iova_cookie) |
| 902 | return; |
| 903 | |
| 904 | cookie = domain->iova_cookie; |
| 905 | |
| 906 | /* |
| 907 | * We disable IRQs to rule out a possible inversion against |
| 908 | * irq_desc_lock if, say, someone tries to retarget the affinity |
| 909 | * of an MSI from within an IPI handler. |
| 910 | */ |
| 911 | spin_lock_irqsave(&cookie->msi_lock, flags); |
| 912 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
| 913 | spin_unlock_irqrestore(&cookie->msi_lock, flags); |
| 914 | |
| 915 | if (WARN_ON(!msi_page)) { |
| 916 | /* |
| 917 | * We're called from a void callback, so the best we can do is |
| 918 | * 'fail' by filling the message with obviously bogus values. |
| 919 | * Since we got this far due to an IOMMU being present, it's |
| 920 | * not like the existing address would have worked anyway... |
| 921 | */ |
| 922 | msg->address_hi = ~0U; |
| 923 | msg->address_lo = ~0U; |
| 924 | msg->data = ~0U; |
| 925 | } else { |
| 926 | msg->address_hi = upper_32_bits(msi_page->iova); |
Robin Murphy | fdbe574 | 2017-01-19 20:57:46 +0000 | [diff] [blame] | 927 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 928 | msg->address_lo += lower_32_bits(msi_page->iova); |
| 929 | } |
| 930 | } |