blob: a7f9c3edbcb299f83f8d4c6093c274a0bc5b3387 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Robin Murphy0db2e5d2015-10-01 20:13:58 +01002/*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
Robin Murphy0db2e5d2015-10-01 20:13:58 +01009 */
10
Shameer Kolothumf51dc892018-02-13 15:20:51 +000011#include <linux/acpi_iort.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010012#include <linux/device.h>
Christoph Hellwig06d60722019-05-20 09:29:29 +020013#include <linux/dma-contiguous.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010014#include <linux/dma-iommu.h>
Christoph Hellwigaf751d42019-05-20 09:29:27 +020015#include <linux/dma-noncoherent.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000016#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010017#include <linux/huge_mm.h>
18#include <linux/iommu.h>
19#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010020#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010021#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010022#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000023#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025
Robin Murphy44bb7e22016-09-12 17:13:59 +010026struct iommu_dma_msi_page {
27 struct list_head list;
28 dma_addr_t iova;
29 phys_addr_t phys;
30};
31
Robin Murphyfdbe5742017-01-19 20:57:46 +000032enum iommu_dma_cookie_type {
33 IOMMU_DMA_IOVA_COOKIE,
34 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010035};
36
Robin Murphyfdbe5742017-01-19 20:57:46 +000037struct iommu_dma_cookie {
38 enum iommu_dma_cookie_type type;
39 union {
40 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
41 struct iova_domain iovad;
42 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
43 dma_addr_t msi_iova;
44 };
45 struct list_head msi_page_list;
46 spinlock_t msi_lock;
Zhen Lei2da274c2018-09-20 17:10:22 +010047
48 /* Domain for flush queue callback; NULL if flush queue not in use */
49 struct iommu_domain *fq_domain;
Robin Murphyfdbe5742017-01-19 20:57:46 +000050};
51
52static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
53{
54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
55 return cookie->iovad.granule;
56 return PAGE_SIZE;
57}
58
Robin Murphyfdbe5742017-01-19 20:57:46 +000059static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
60{
61 struct iommu_dma_cookie *cookie;
62
63 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
64 if (cookie) {
65 spin_lock_init(&cookie->msi_lock);
66 INIT_LIST_HEAD(&cookie->msi_page_list);
67 cookie->type = type;
68 }
69 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010070}
71
Robin Murphy0db2e5d2015-10-01 20:13:58 +010072/**
73 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
74 * @domain: IOMMU domain to prepare for DMA-API usage
75 *
76 * IOMMU drivers should normally call this from their domain_alloc
77 * callback when domain->type == IOMMU_DOMAIN_DMA.
78 */
79int iommu_get_dma_cookie(struct iommu_domain *domain)
80{
Robin Murphy0db2e5d2015-10-01 20:13:58 +010081 if (domain->iova_cookie)
82 return -EEXIST;
83
Robin Murphyfdbe5742017-01-19 20:57:46 +000084 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
85 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +010086 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010087
Robin Murphy44bb7e22016-09-12 17:13:59 +010088 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010089}
90EXPORT_SYMBOL(iommu_get_dma_cookie);
91
92/**
Robin Murphyfdbe5742017-01-19 20:57:46 +000093 * iommu_get_msi_cookie - Acquire just MSI remapping resources
94 * @domain: IOMMU domain to prepare
95 * @base: Start address of IOVA region for MSI mappings
96 *
97 * Users who manage their own IOVA allocation and do not want DMA API support,
98 * but would still like to take advantage of automatic MSI remapping, can use
99 * this to initialise their own domain appropriately. Users should reserve a
100 * contiguous IOVA region, starting at @base, large enough to accommodate the
101 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
102 * used by the devices attached to @domain.
103 */
104int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105{
106 struct iommu_dma_cookie *cookie;
107
108 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
109 return -EINVAL;
110
111 if (domain->iova_cookie)
112 return -EEXIST;
113
114 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115 if (!cookie)
116 return -ENOMEM;
117
118 cookie->msi_iova = base;
119 domain->iova_cookie = cookie;
120 return 0;
121}
122EXPORT_SYMBOL(iommu_get_msi_cookie);
123
124/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100125 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
127 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100128 *
129 * IOMMU drivers should normally call this from their domain_free callback.
130 */
131void iommu_put_dma_cookie(struct iommu_domain *domain)
132{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100133 struct iommu_dma_cookie *cookie = domain->iova_cookie;
134 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100135
Robin Murphy44bb7e22016-09-12 17:13:59 +0100136 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100137 return;
138
Robin Murphyfdbe5742017-01-19 20:57:46 +0000139 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100140 put_iova_domain(&cookie->iovad);
141
142 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
143 list_del(&msi->list);
144 kfree(msi);
145 }
146 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 domain->iova_cookie = NULL;
148}
149EXPORT_SYMBOL(iommu_put_dma_cookie);
150
Robin Murphy273df962017-03-16 17:00:19 +0000151/**
152 * iommu_dma_get_resv_regions - Reserved region driver helper
153 * @dev: Device from iommu_get_resv_regions()
154 * @list: Reserved region list from iommu_get_resv_regions()
155 *
156 * IOMMU drivers can use this to implement their .get_resv_regions callback
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100157 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
158 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
159 * reservation.
Robin Murphy273df962017-03-16 17:00:19 +0000160 */
161void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100162{
Robin Murphyfade1ec2016-09-12 17:14:00 +0100163
Joerg Roedel98cc4f72018-11-29 14:01:00 +0100164 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100165 iort_iommu_msi_get_resv_regions(dev, list);
Shameer Kolothumf51dc892018-02-13 15:20:51 +0000166
Robin Murphyfade1ec2016-09-12 17:14:00 +0100167}
Robin Murphy273df962017-03-16 17:00:19 +0000168EXPORT_SYMBOL(iommu_dma_get_resv_regions);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100169
Robin Murphy7c1b0582017-03-16 17:00:18 +0000170static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
171 phys_addr_t start, phys_addr_t end)
172{
173 struct iova_domain *iovad = &cookie->iovad;
174 struct iommu_dma_msi_page *msi_page;
175 int i, num_pages;
176
177 start -= iova_offset(iovad, start);
178 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
179
180 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
181 if (!msi_page)
182 return -ENOMEM;
183
184 for (i = 0; i < num_pages; i++) {
185 msi_page[i].phys = start;
186 msi_page[i].iova = start;
187 INIT_LIST_HEAD(&msi_page[i].list);
188 list_add(&msi_page[i].list, &cookie->msi_page_list);
189 start += iovad->granule;
190 }
191
192 return 0;
193}
194
Srinath Mannamaadad0972019-05-03 19:35:33 +0530195static int iova_reserve_pci_windows(struct pci_dev *dev,
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100196 struct iova_domain *iovad)
197{
198 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
199 struct resource_entry *window;
200 unsigned long lo, hi;
Srinath Mannamaadad0972019-05-03 19:35:33 +0530201 phys_addr_t start = 0, end;
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100202
203 resource_list_for_each_entry(window, &bridge->windows) {
204 if (resource_type(window->res) != IORESOURCE_MEM)
205 continue;
206
207 lo = iova_pfn(iovad, window->res->start - window->offset);
208 hi = iova_pfn(iovad, window->res->end - window->offset);
209 reserve_iova(iovad, lo, hi);
210 }
Srinath Mannamaadad0972019-05-03 19:35:33 +0530211
212 /* Get reserved DMA windows from host bridge */
213 resource_list_for_each_entry(window, &bridge->dma_ranges) {
214 end = window->res->start - window->offset;
215resv_iova:
216 if (end > start) {
217 lo = iova_pfn(iovad, start);
218 hi = iova_pfn(iovad, end);
219 reserve_iova(iovad, lo, hi);
220 } else {
221 /* dma_ranges list should be sorted */
222 dev_err(&dev->dev, "Failed to reserve IOVA\n");
223 return -EINVAL;
224 }
225
226 start = window->res->end - window->offset + 1;
227 /* If window is last entry */
228 if (window->node.next == &bridge->dma_ranges &&
Arnd Bergmann29fcea82019-06-17 15:30:54 +0200229 end != ~(phys_addr_t)0) {
230 end = ~(phys_addr_t)0;
Srinath Mannamaadad0972019-05-03 19:35:33 +0530231 goto resv_iova;
232 }
233 }
234
235 return 0;
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100236}
237
Robin Murphy7c1b0582017-03-16 17:00:18 +0000238static int iova_reserve_iommu_regions(struct device *dev,
239 struct iommu_domain *domain)
240{
241 struct iommu_dma_cookie *cookie = domain->iova_cookie;
242 struct iova_domain *iovad = &cookie->iovad;
243 struct iommu_resv_region *region;
244 LIST_HEAD(resv_regions);
245 int ret = 0;
246
Srinath Mannamaadad0972019-05-03 19:35:33 +0530247 if (dev_is_pci(dev)) {
248 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
249 if (ret)
250 return ret;
251 }
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100252
Robin Murphy7c1b0582017-03-16 17:00:18 +0000253 iommu_get_resv_regions(dev, &resv_regions);
254 list_for_each_entry(region, &resv_regions, list) {
255 unsigned long lo, hi;
256
257 /* We ARE the software that manages these! */
258 if (region->type == IOMMU_RESV_SW_MSI)
259 continue;
260
261 lo = iova_pfn(iovad, region->start);
262 hi = iova_pfn(iovad, region->start + region->length - 1);
263 reserve_iova(iovad, lo, hi);
264
265 if (region->type == IOMMU_RESV_MSI)
266 ret = cookie_init_hw_msi_region(cookie, region->start,
267 region->start + region->length);
268 if (ret)
269 break;
270 }
271 iommu_put_resv_regions(dev, &resv_regions);
272
273 return ret;
274}
275
Zhen Lei2da274c2018-09-20 17:10:22 +0100276static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
277{
278 struct iommu_dma_cookie *cookie;
279 struct iommu_domain *domain;
280
281 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
282 domain = cookie->fq_domain;
283 /*
284 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
285 * implies that ops->flush_iotlb_all must be non-NULL.
286 */
287 domain->ops->flush_iotlb_all(domain);
288}
289
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100290/**
291 * iommu_dma_init_domain - Initialise a DMA mapping domain
292 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
293 * @base: IOVA at which the mappable address space starts
294 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100295 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100296 *
297 * @base and @size should be exact multiples of IOMMU page granularity to
298 * avoid rounding surprises. If necessary, we reserve the page at address 0
299 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
300 * any change which could make prior IOVAs invalid will fail.
301 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200302static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
Robin Murphyfade1ec2016-09-12 17:14:00 +0100303 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100304{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000305 struct iommu_dma_cookie *cookie = domain->iova_cookie;
306 struct iova_domain *iovad = &cookie->iovad;
Shaokun Zhangc61a4632019-01-24 15:10:02 +0800307 unsigned long order, base_pfn;
Zhen Lei2da274c2018-09-20 17:10:22 +0100308 int attr;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100309
Robin Murphyfdbe5742017-01-19 20:57:46 +0000310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
311 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100312
313 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100314 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100315 base_pfn = max_t(unsigned long, 1, base >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100316
317 /* Check the domain allows at least some access to the device... */
318 if (domain->geometry.force_aperture) {
319 if (base > domain->geometry.aperture_end ||
320 base + size <= domain->geometry.aperture_start) {
321 pr_warn("specified DMA range outside IOMMU capability\n");
322 return -EFAULT;
323 }
324 /* ...then finally give it a kicking to make sure it fits */
325 base_pfn = max_t(unsigned long, base_pfn,
326 domain->geometry.aperture_start >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100327 }
328
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000329 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100330 if (iovad->start_pfn) {
331 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000332 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100333 pr_warn("Incompatible range for DMA domain\n");
334 return -EFAULT;
335 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000336
337 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100338 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000339
Zhen Leiaa3ac942017-09-21 16:52:45 +0100340 init_iova_domain(iovad, 1UL << order, base_pfn);
Zhen Lei2da274c2018-09-20 17:10:22 +0100341
342 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
343 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
344 cookie->fq_domain = domain;
345 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
346 }
347
Robin Murphy7c1b0582017-03-16 17:00:18 +0000348 if (!dev)
349 return 0;
350
351 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100352}
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100353
354/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530355 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
356 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100357 * @dir: Direction of DMA transfer
358 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530359 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100360 *
361 * Return: corresponding IOMMU API page protection flags
362 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200363static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530364 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100365{
366 int prot = coherent ? IOMMU_CACHE : 0;
367
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530368 if (attrs & DMA_ATTR_PRIVILEGED)
369 prot |= IOMMU_PRIV;
370
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100371 switch (dir) {
372 case DMA_BIDIRECTIONAL:
373 return prot | IOMMU_READ | IOMMU_WRITE;
374 case DMA_TO_DEVICE:
375 return prot | IOMMU_READ;
376 case DMA_FROM_DEVICE:
377 return prot | IOMMU_WRITE;
378 default:
379 return 0;
380 }
381}
382
Robin Murphy842fe512017-03-31 15:46:05 +0100383static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
384 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100385{
Robin Murphya44e6652017-03-31 15:46:06 +0100386 struct iommu_dma_cookie *cookie = domain->iova_cookie;
387 struct iova_domain *iovad = &cookie->iovad;
Robin Murphybb65a642017-03-31 15:46:07 +0100388 unsigned long shift, iova_len, iova = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100389
Robin Murphya44e6652017-03-31 15:46:06 +0100390 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
391 cookie->msi_iova += size;
392 return cookie->msi_iova - size;
393 }
394
395 shift = iova_shift(iovad);
396 iova_len = size >> shift;
Robin Murphybb65a642017-03-31 15:46:07 +0100397 /*
398 * Freeing non-power-of-two-sized allocations back into the IOVA caches
399 * will come back to bite us badly, so we have to waste a bit of space
400 * rounding up anything cacheable to make sure that can't happen. The
401 * order of the unadjusted size will still match upon freeing.
402 */
403 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
404 iova_len = roundup_pow_of_two(iova_len);
Robin Murphya44e6652017-03-31 15:46:06 +0100405
Robin Murphy03bfdc32018-07-23 23:16:10 +0100406 if (dev->bus_dma_mask)
407 dma_limit &= dev->bus_dma_mask;
408
Robin Murphyc987ff02016-08-09 17:31:35 +0100409 if (domain->geometry.force_aperture)
410 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000411
412 /* Try to get PCI devices a SAC address */
413 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200414 iova = alloc_iova_fast(iovad, iova_len,
415 DMA_BIT_MASK(32) >> shift, false);
Robin Murphy122fac02017-01-16 13:24:55 +0000416
Robin Murphybb65a642017-03-31 15:46:07 +0100417 if (!iova)
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200418 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
419 true);
Robin Murphybb65a642017-03-31 15:46:07 +0100420
421 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100422}
423
Robin Murphy842fe512017-03-31 15:46:05 +0100424static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
425 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100426{
Robin Murphy842fe512017-03-31 15:46:05 +0100427 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100428
Robin Murphya44e6652017-03-31 15:46:06 +0100429 /* The MSI case is only ever cleaning up its most recent allocation */
Robin Murphybb65a642017-03-31 15:46:07 +0100430 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
Robin Murphya44e6652017-03-31 15:46:06 +0100431 cookie->msi_iova -= size;
Zhen Lei2da274c2018-09-20 17:10:22 +0100432 else if (cookie->fq_domain) /* non-strict mode */
433 queue_iova(iovad, iova_pfn(iovad, iova),
434 size >> iova_shift(iovad), 0);
Robin Murphybb65a642017-03-31 15:46:07 +0100435 else
Robin Murphy1cc896e2017-05-15 16:01:30 +0100436 free_iova_fast(iovad, iova_pfn(iovad, iova),
437 size >> iova_shift(iovad));
Robin Murphy842fe512017-03-31 15:46:05 +0100438}
439
Robin Murphyb61d2712019-05-20 09:29:31 +0200440static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
Robin Murphy842fe512017-03-31 15:46:05 +0100441 size_t size)
442{
Robin Murphyb61d2712019-05-20 09:29:31 +0200443 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphya44e6652017-03-31 15:46:06 +0100444 struct iommu_dma_cookie *cookie = domain->iova_cookie;
445 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy842fe512017-03-31 15:46:05 +0100446 size_t iova_off = iova_offset(iovad, dma_addr);
447
448 dma_addr -= iova_off;
449 size = iova_align(iovad, size + iova_off);
450
Zhen Lei2da274c2018-09-20 17:10:22 +0100451 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
452 if (!cookie->fq_domain)
453 iommu_tlb_sync(domain);
Robin Murphya44e6652017-03-31 15:46:06 +0100454 iommu_dma_free_iova(cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100455}
456
Christoph Hellwig92aec092019-05-20 09:29:30 +0200457static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
Robin Murphyb61d2712019-05-20 09:29:31 +0200458 size_t size, int prot)
Christoph Hellwig92aec092019-05-20 09:29:30 +0200459{
Robin Murphyb61d2712019-05-20 09:29:31 +0200460 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Christoph Hellwig92aec092019-05-20 09:29:30 +0200461 struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 size_t iova_off = 0;
463 dma_addr_t iova;
464
465 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
466 iova_off = iova_offset(&cookie->iovad, phys);
467 size = iova_align(&cookie->iovad, size + iova_off);
468 }
469
470 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
471 if (!iova)
472 return DMA_MAPPING_ERROR;
473
474 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
475 iommu_dma_free_iova(cookie, iova, size);
476 return DMA_MAPPING_ERROR;
477 }
478 return iova + iova_off;
479}
480
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100481static void __iommu_dma_free_pages(struct page **pages, int count)
482{
483 while (count--)
484 __free_page(pages[count]);
485 kvfree(pages);
486}
487
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800488static struct page **__iommu_dma_alloc_pages(struct device *dev,
489 unsigned int count, unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100490{
491 struct page **pages;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800492 unsigned int i = 0, nid = dev_to_node(dev);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100493
494 order_mask &= (2U << MAX_ORDER) - 1;
495 if (!order_mask)
496 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100497
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800498 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100499 if (!pages)
500 return NULL;
501
502 /* IOMMU can map any pages, so himem can also be used here */
503 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
504
505 while (count) {
506 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100507 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100508
509 /*
510 * Higher-order allocations are a convenience rather
511 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100512 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100513 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100514 for (order_mask &= (2U << __fls(count)) - 1;
515 order_mask; order_mask &= ~order_size) {
516 unsigned int order = __fls(order_mask);
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800517 gfp_t alloc_flags = gfp;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100518
519 order_size = 1U << order;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800520 if (order_mask > order_size)
521 alloc_flags |= __GFP_NORETRY;
522 page = alloc_pages_node(nid, alloc_flags, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100523 if (!page)
524 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100525 if (!order)
526 break;
527 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100528 split_page(page, order);
529 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100530 } else if (!split_huge_page(page)) {
531 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100532 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100533 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100534 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100535 if (!page) {
536 __iommu_dma_free_pages(pages, i);
537 return NULL;
538 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100539 count -= order_size;
540 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100541 pages[i++] = page++;
542 }
543 return pages;
544}
545
Robin Murphy4c360ace2019-05-20 09:29:33 +0200546static struct page **__iommu_dma_get_pages(void *cpu_addr)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100547{
Robin Murphy4c360ace2019-05-20 09:29:33 +0200548 struct vm_struct *area = find_vm_area(cpu_addr);
549
550 if (!area || !area->pages)
551 return NULL;
552 return area->pages;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100553}
554
555/**
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200556 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100557 * @dev: Device to allocate memory for. Must be a real device
558 * attached to an iommu_dma_domain
559 * @size: Size of buffer in bytes
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200560 * @dma_handle: Out argument for allocated DMA handle
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100561 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100562 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100563 *
564 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
565 * but an IOMMU which supports smaller pages might not map the whole thing.
566 *
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200567 * Return: Mapped virtual address, or NULL on failure.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100568 */
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200569static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
570 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100571{
Robin Murphy43c5bf12018-09-12 16:24:13 +0100572 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100573 struct iommu_dma_cookie *cookie = domain->iova_cookie;
574 struct iova_domain *iovad = &cookie->iovad;
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200575 bool coherent = dev_is_dma_coherent(dev);
576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
577 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100579 struct page **pages;
580 struct sg_table sgt;
Robin Murphy842fe512017-03-31 15:46:05 +0100581 dma_addr_t iova;
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200582 void *vaddr;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100583
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200584 *dma_handle = DMA_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100585
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100586 min_size = alloc_sizes & -alloc_sizes;
587 if (min_size < PAGE_SIZE) {
588 min_size = PAGE_SIZE;
589 alloc_sizes |= PAGE_SIZE;
590 } else {
591 size = ALIGN(size, min_size);
592 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700593 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100594 alloc_sizes = min_size;
595
596 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800597 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
598 gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100599 if (!pages)
600 return NULL;
601
Robin Murphy842fe512017-03-31 15:46:05 +0100602 size = iova_align(iovad, size);
603 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100604 if (!iova)
605 goto out_free_pages;
606
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100607 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
608 goto out_free_iova;
609
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200610 if (!(ioprot & IOMMU_CACHE)) {
Christoph Hellwig23f88e02019-05-20 09:29:28 +0200611 struct scatterlist *sg;
612 int i;
613
614 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
615 arch_dma_prep_coherent(sg_page(sg), sg->length);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100616 }
617
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200618 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100619 < size)
620 goto out_free_sg;
621
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200622 vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
623 __builtin_return_address(0));
624 if (!vaddr)
625 goto out_unmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100626
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200627 *dma_handle = iova;
628 sg_free_table(&sgt);
629 return vaddr;
630
631out_unmap:
632 __iommu_dma_unmap(dev, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100633out_free_sg:
634 sg_free_table(&sgt);
635out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100636 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100637out_free_pages:
638 __iommu_dma_free_pages(pages, count);
639 return NULL;
640}
641
642/**
Christoph Hellwig06d60722019-05-20 09:29:29 +0200643 * __iommu_dma_mmap - Map a buffer into provided user VMA
644 * @pages: Array representing buffer from __iommu_dma_alloc()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100645 * @size: Size of buffer in bytes
646 * @vma: VMA describing requested userspace mapping
647 *
648 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
649 * for verifying the correct size and protection of @vma beforehand.
650 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200651static int __iommu_dma_mmap(struct page **pages, size_t size,
652 struct vm_area_struct *vma)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100653{
Souptick Joarderb0d00842019-05-13 17:22:15 -0700654 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100655}
656
Christoph Hellwig06d60722019-05-20 09:29:29 +0200657static void iommu_dma_sync_single_for_cpu(struct device *dev,
658 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100659{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200660 phys_addr_t phys;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100661
Christoph Hellwig06d60722019-05-20 09:29:29 +0200662 if (dev_is_dma_coherent(dev))
663 return;
Robin Murphy1cc896e2017-05-15 16:01:30 +0100664
Christoph Hellwig06d60722019-05-20 09:29:29 +0200665 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
666 arch_sync_dma_for_cpu(dev, phys, size, dir);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100667}
668
Christoph Hellwig06d60722019-05-20 09:29:29 +0200669static void iommu_dma_sync_single_for_device(struct device *dev,
670 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
Robin Murphy51f8cc92016-11-14 12:16:26 +0000671{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200672 phys_addr_t phys;
673
674 if (dev_is_dma_coherent(dev))
675 return;
676
677 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
678 arch_sync_dma_for_device(dev, phys, size, dir);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000679}
680
Christoph Hellwig06d60722019-05-20 09:29:29 +0200681static void iommu_dma_sync_sg_for_cpu(struct device *dev,
682 struct scatterlist *sgl, int nelems,
683 enum dma_data_direction dir)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100684{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200685 struct scatterlist *sg;
686 int i;
687
688 if (dev_is_dma_coherent(dev))
689 return;
690
691 for_each_sg(sgl, sg, nelems, i)
692 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
693}
694
695static void iommu_dma_sync_sg_for_device(struct device *dev,
696 struct scatterlist *sgl, int nelems,
697 enum dma_data_direction dir)
698{
699 struct scatterlist *sg;
700 int i;
701
702 if (dev_is_dma_coherent(dev))
703 return;
704
705 for_each_sg(sgl, sg, nelems, i)
706 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
707}
708
Christoph Hellwig06d60722019-05-20 09:29:29 +0200709static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
710 unsigned long offset, size_t size, enum dma_data_direction dir,
711 unsigned long attrs)
712{
713 phys_addr_t phys = page_to_phys(page) + offset;
714 bool coherent = dev_is_dma_coherent(dev);
Robin Murphyb61d2712019-05-20 09:29:31 +0200715 int prot = dma_info_to_prot(dir, coherent, attrs);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200716 dma_addr_t dma_handle;
717
Robin Murphyb61d2712019-05-20 09:29:31 +0200718 dma_handle =__iommu_dma_map(dev, phys, size, prot);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200719 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
720 dma_handle != DMA_MAPPING_ERROR)
721 arch_sync_dma_for_device(dev, phys, size, dir);
722 return dma_handle;
723}
724
725static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
726 size_t size, enum dma_data_direction dir, unsigned long attrs)
727{
728 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
729 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
Robin Murphyb61d2712019-05-20 09:29:31 +0200730 __iommu_dma_unmap(dev, dma_handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100731}
732
733/*
734 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100735 *
736 * At this point the segments are already laid out by iommu_dma_map_sg() to
737 * avoid individually crossing any boundaries, so we merely need to check a
738 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100739 */
740static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
741 dma_addr_t dma_addr)
742{
Robin Murphy809eac52016-04-11 12:32:31 +0100743 struct scatterlist *s, *cur = sg;
744 unsigned long seg_mask = dma_get_seg_boundary(dev);
745 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
746 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100747
748 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100749 /* Restore this segment's original unaligned fields first */
750 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100751 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100752 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100753
Robin Murphy809eac52016-04-11 12:32:31 +0100754 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100755 s->length = s_length;
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100756 sg_dma_address(s) = DMA_MAPPING_ERROR;
Robin Murphy809eac52016-04-11 12:32:31 +0100757 sg_dma_len(s) = 0;
758
759 /*
760 * Now fill in the real DMA data. If...
761 * - there is a valid output segment to append to
762 * - and this segment starts on an IOVA page boundary
763 * - but doesn't fall at a segment boundary
764 * - and wouldn't make the resulting output segment too long
765 */
766 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
767 (cur_len + s_length <= max_len)) {
768 /* ...then concatenate it with the previous one */
769 cur_len += s_length;
770 } else {
771 /* Otherwise start the next output segment */
772 if (i > 0)
773 cur = sg_next(cur);
774 cur_len = s_length;
775 count++;
776
777 sg_dma_address(cur) = dma_addr + s_iova_off;
778 }
779
780 sg_dma_len(cur) = cur_len;
781 dma_addr += s_iova_len;
782
783 if (s_length + s_iova_off < s_iova_len)
784 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100785 }
Robin Murphy809eac52016-04-11 12:32:31 +0100786 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100787}
788
789/*
790 * If mapping failed, then just restore the original list,
791 * but making sure the DMA fields are invalidated.
792 */
793static void __invalidate_sg(struct scatterlist *sg, int nents)
794{
795 struct scatterlist *s;
796 int i;
797
798 for_each_sg(sg, s, nents, i) {
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100799 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000800 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100801 if (sg_dma_len(s))
802 s->length = sg_dma_len(s);
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100803 sg_dma_address(s) = DMA_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100804 sg_dma_len(s) = 0;
805 }
806}
807
808/*
809 * The DMA API client is passing in a scatterlist which could describe
810 * any old buffer layout, but the IOMMU API requires everything to be
811 * aligned to IOMMU pages. Hence the need for this complicated bit of
812 * impedance-matching, to be able to hand off a suitably-aligned list,
813 * but still preserve the original offsets and sizes for the caller.
814 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200815static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
816 int nents, enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100817{
Robin Murphy43c5bf12018-09-12 16:24:13 +0100818 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100819 struct iommu_dma_cookie *cookie = domain->iova_cookie;
820 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100821 struct scatterlist *s, *prev = NULL;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200822 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
Robin Murphy842fe512017-03-31 15:46:05 +0100823 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100824 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100825 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100826 int i;
827
Christoph Hellwig06d60722019-05-20 09:29:29 +0200828 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
829 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
830
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100831 /*
832 * Work out how much IOVA space we need, and align the segments to
833 * IOVA granules for the IOMMU driver to handle. With some clever
834 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100835 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100836 */
837 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100838 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100839 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100840 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100841
Robin Murphy809eac52016-04-11 12:32:31 +0100842 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100843 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100844 s->offset -= s_iova_off;
845 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100846 s->length = s_length;
847
848 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100849 * Due to the alignment of our single IOVA allocation, we can
850 * depend on these assumptions about the segment boundary mask:
851 * - If mask size >= IOVA size, then the IOVA range cannot
852 * possibly fall across a boundary, so we don't care.
853 * - If mask size < IOVA size, then the IOVA range must start
854 * exactly on a boundary, therefore we can lay things out
855 * based purely on segment lengths without needing to know
856 * the actual addresses beforehand.
857 * - The mask must be a power of 2, so pad_len == 0 if
858 * iova_len == 0, thus we cannot dereference prev the first
859 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100860 */
Robin Murphy809eac52016-04-11 12:32:31 +0100861 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100862 prev->length += pad_len;
863 iova_len += pad_len;
864 }
865
866 iova_len += s_length;
867 prev = s;
868 }
869
Robin Murphy842fe512017-03-31 15:46:05 +0100870 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100871 if (!iova)
872 goto out_restore_sg;
873
874 /*
875 * We'll leave any physical concatenation to the IOMMU driver's
876 * implementation - it knows better than we do.
877 */
Robin Murphy842fe512017-03-31 15:46:05 +0100878 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100879 goto out_free_iova;
880
Robin Murphy842fe512017-03-31 15:46:05 +0100881 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100882
883out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100884 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100885out_restore_sg:
886 __invalidate_sg(sg, nents);
887 return 0;
888}
889
Christoph Hellwig06d60722019-05-20 09:29:29 +0200890static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
891 int nents, enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100892{
Robin Murphy842fe512017-03-31 15:46:05 +0100893 dma_addr_t start, end;
894 struct scatterlist *tmp;
895 int i;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200896
Nathan Chancellor1b9614232019-05-29 01:15:32 -0700897 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
Christoph Hellwig06d60722019-05-20 09:29:29 +0200898 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
899
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100900 /*
901 * The scatterlist segments are mapped into a single
902 * contiguous IOVA allocation, so this is incredibly easy.
903 */
Robin Murphy842fe512017-03-31 15:46:05 +0100904 start = sg_dma_address(sg);
905 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
906 if (sg_dma_len(tmp) == 0)
907 break;
908 sg = tmp;
909 }
910 end = sg_dma_address(sg) + sg_dma_len(sg);
Robin Murphyb61d2712019-05-20 09:29:31 +0200911 __iommu_dma_unmap(dev, start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100912}
913
Christoph Hellwig06d60722019-05-20 09:29:29 +0200914static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
Robin Murphy51f8cc92016-11-14 12:16:26 +0000915 size_t size, enum dma_data_direction dir, unsigned long attrs)
916{
917 return __iommu_dma_map(dev, phys, size,
Robin Murphyb61d2712019-05-20 09:29:31 +0200918 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000919}
920
Christoph Hellwig06d60722019-05-20 09:29:29 +0200921static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
Robin Murphy51f8cc92016-11-14 12:16:26 +0000922 size_t size, enum dma_data_direction dir, unsigned long attrs)
923{
Robin Murphyb61d2712019-05-20 09:29:31 +0200924 __iommu_dma_unmap(dev, handle, size);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000925}
926
Robin Murphy8553f6e2019-05-20 09:29:40 +0200927static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200928{
929 size_t alloc_size = PAGE_ALIGN(size);
930 int count = alloc_size >> PAGE_SHIFT;
931 struct page *page = NULL, **pages = NULL;
932
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200933 /* Non-coherent atomic allocation? Easy */
Christoph Hellwige6475eb2019-05-20 09:29:45 +0200934 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
935 dma_free_from_pool(cpu_addr, alloc_size))
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200936 return;
937
Christoph Hellwige6475eb2019-05-20 09:29:45 +0200938 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200939 /*
940 * If it the address is remapped, then it's either non-coherent
941 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
942 */
943 pages = __iommu_dma_get_pages(cpu_addr);
944 if (!pages)
945 page = vmalloc_to_page(cpu_addr);
946 dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
947 } else {
948 /* Lowmem means a coherent atomic or CMA allocation */
949 page = virt_to_page(cpu_addr);
950 }
951
952 if (pages)
953 __iommu_dma_free_pages(pages, count);
Nicolin Chen591fcf32019-06-03 15:52:59 -0700954 if (page)
955 dma_free_contiguous(dev, page, alloc_size);
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200956}
957
Robin Murphy8553f6e2019-05-20 09:29:40 +0200958static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
959 dma_addr_t handle, unsigned long attrs)
960{
961 __iommu_dma_unmap(dev, handle, size);
962 __iommu_dma_free(dev, size, cpu_addr);
963}
964
Christoph Hellwigee1ef052019-05-20 09:29:42 +0200965static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
966 struct page **pagep, gfp_t gfp, unsigned long attrs)
Christoph Hellwig06d60722019-05-20 09:29:29 +0200967{
968 bool coherent = dev_is_dma_coherent(dev);
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200969 size_t alloc_size = PAGE_ALIGN(size);
Christoph Hellwig9a4ab942019-05-20 09:29:39 +0200970 struct page *page = NULL;
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200971 void *cpu_addr;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200972
Nicolin Chen591fcf32019-06-03 15:52:59 -0700973 page = dma_alloc_contiguous(dev, alloc_size, gfp);
Robin Murphy072bebc2019-05-20 09:29:37 +0200974 if (!page)
975 return NULL;
976
Christoph Hellwige6475eb2019-05-20 09:29:45 +0200977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
Robin Murphy8680aa52019-05-20 09:29:38 +0200978 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
Robin Murphy072bebc2019-05-20 09:29:37 +0200979
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200980 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
981 VM_USERMAP, prot, __builtin_return_address(0));
982 if (!cpu_addr)
Christoph Hellwigee1ef052019-05-20 09:29:42 +0200983 goto out_free_pages;
Robin Murphy8680aa52019-05-20 09:29:38 +0200984
985 if (!coherent)
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200986 arch_dma_prep_coherent(page, size);
Robin Murphy8680aa52019-05-20 09:29:38 +0200987 } else {
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200988 cpu_addr = page_address(page);
Robin Murphy8680aa52019-05-20 09:29:38 +0200989 }
Christoph Hellwigee1ef052019-05-20 09:29:42 +0200990
991 *pagep = page;
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200992 memset(cpu_addr, 0, alloc_size);
993 return cpu_addr;
Robin Murphy072bebc2019-05-20 09:29:37 +0200994out_free_pages:
Nicolin Chen591fcf32019-06-03 15:52:59 -0700995 dma_free_contiguous(dev, page, alloc_size);
Robin Murphy072bebc2019-05-20 09:29:37 +0200996 return NULL;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200997}
998
Christoph Hellwigee1ef052019-05-20 09:29:42 +0200999static void *iommu_dma_alloc(struct device *dev, size_t size,
1000 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1001{
1002 bool coherent = dev_is_dma_coherent(dev);
1003 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1004 struct page *page = NULL;
1005 void *cpu_addr;
1006
1007 gfp |= __GFP_ZERO;
1008
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001009 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001010 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
1011 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1012
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001013 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1014 !gfpflags_allow_blocking(gfp) && !coherent)
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001015 cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
1016 else
1017 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1018 if (!cpu_addr)
1019 return NULL;
1020
1021 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
1022 if (*handle == DMA_MAPPING_ERROR) {
1023 __iommu_dma_free(dev, size, cpu_addr);
1024 return NULL;
1025 }
1026
1027 return cpu_addr;
1028}
1029
Christoph Hellwig06d60722019-05-20 09:29:29 +02001030static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1031 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1032 unsigned long attrs)
1033{
1034 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001035 unsigned long pfn, off = vma->vm_pgoff;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001036 int ret;
1037
1038 vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
1039
1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1041 return ret;
1042
1043 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1044 return -ENXIO;
1045
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001046 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001047 struct page **pages = __iommu_dma_get_pages(cpu_addr);
1048
1049 if (pages)
1050 return __iommu_dma_mmap(pages, size, vma);
1051 pfn = vmalloc_to_pfn(cpu_addr);
1052 } else {
1053 pfn = page_to_pfn(virt_to_page(cpu_addr));
Christoph Hellwig06d60722019-05-20 09:29:29 +02001054 }
1055
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001056 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1057 vma->vm_end - vma->vm_start,
1058 vma->vm_page_prot);
Christoph Hellwig06d60722019-05-20 09:29:29 +02001059}
1060
Christoph Hellwig06d60722019-05-20 09:29:29 +02001061static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1062 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1063 unsigned long attrs)
1064{
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001065 struct page *page;
1066 int ret;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001067
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001068 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001069 struct page **pages = __iommu_dma_get_pages(cpu_addr);
1070
1071 if (pages) {
1072 return sg_alloc_table_from_pages(sgt, pages,
1073 PAGE_ALIGN(size) >> PAGE_SHIFT,
1074 0, size, GFP_KERNEL);
1075 }
1076
1077 page = vmalloc_to_page(cpu_addr);
1078 } else {
1079 page = virt_to_page(cpu_addr);
Christoph Hellwig06d60722019-05-20 09:29:29 +02001080 }
1081
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001082 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1083 if (!ret)
1084 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1085 return ret;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001086}
1087
1088static const struct dma_map_ops iommu_dma_ops = {
1089 .alloc = iommu_dma_alloc,
1090 .free = iommu_dma_free,
1091 .mmap = iommu_dma_mmap,
1092 .get_sgtable = iommu_dma_get_sgtable,
1093 .map_page = iommu_dma_map_page,
1094 .unmap_page = iommu_dma_unmap_page,
1095 .map_sg = iommu_dma_map_sg,
1096 .unmap_sg = iommu_dma_unmap_sg,
1097 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1098 .sync_single_for_device = iommu_dma_sync_single_for_device,
1099 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1100 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1101 .map_resource = iommu_dma_map_resource,
1102 .unmap_resource = iommu_dma_unmap_resource,
1103};
1104
1105/*
1106 * The IOMMU core code allocates the default DMA domain, which the underlying
1107 * IOMMU driver needs to support via the dma-iommu layer.
1108 */
1109void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1110{
1111 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1112
1113 if (!domain)
1114 goto out_err;
1115
1116 /*
1117 * The IOMMU core code allocates the default DMA domain, which the
1118 * underlying IOMMU driver needs to support via the dma-iommu layer.
1119 */
1120 if (domain->type == IOMMU_DOMAIN_DMA) {
1121 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1122 goto out_err;
1123 dev->dma_ops = &iommu_dma_ops;
1124 }
1125
1126 return;
1127out_err:
1128 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1129 dev_name(dev));
Robin Murphy44bb7e22016-09-12 17:13:59 +01001130}
1131
Robin Murphy44bb7e22016-09-12 17:13:59 +01001132static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1133 phys_addr_t msi_addr, struct iommu_domain *domain)
1134{
1135 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1136 struct iommu_dma_msi_page *msi_page;
Robin Murphy842fe512017-03-31 15:46:05 +01001137 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001138 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +00001139 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001140
Robin Murphyfdbe5742017-01-19 20:57:46 +00001141 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001142 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1143 if (msi_page->phys == msi_addr)
1144 return msi_page;
1145
1146 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
1147 if (!msi_page)
1148 return NULL;
1149
Robin Murphyb61d2712019-05-20 09:29:31 +02001150 iova = __iommu_dma_map(dev, msi_addr, size, prot);
Christoph Hellwigcad34be2018-11-21 19:35:19 +01001151 if (iova == DMA_MAPPING_ERROR)
Robin Murphya44e6652017-03-31 15:46:06 +01001152 goto out_free_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001153
1154 INIT_LIST_HEAD(&msi_page->list);
Robin Murphya44e6652017-03-31 15:46:06 +01001155 msi_page->phys = msi_addr;
1156 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001157 list_add(&msi_page->list, &cookie->msi_page_list);
1158 return msi_page;
1159
Robin Murphy44bb7e22016-09-12 17:13:59 +01001160out_free_page:
1161 kfree(msi_page);
1162 return NULL;
1163}
1164
Julien Grallece6e6f2019-05-01 14:58:19 +01001165int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
Robin Murphy44bb7e22016-09-12 17:13:59 +01001166{
Julien Grallece6e6f2019-05-01 14:58:19 +01001167 struct device *dev = msi_desc_to_dev(desc);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001168 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1169 struct iommu_dma_cookie *cookie;
1170 struct iommu_dma_msi_page *msi_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001171 unsigned long flags;
1172
Julien Grallece6e6f2019-05-01 14:58:19 +01001173 if (!domain || !domain->iova_cookie) {
1174 desc->iommu_cookie = NULL;
1175 return 0;
1176 }
Robin Murphy44bb7e22016-09-12 17:13:59 +01001177
1178 cookie = domain->iova_cookie;
1179
1180 /*
1181 * We disable IRQs to rule out a possible inversion against
1182 * irq_desc_lock if, say, someone tries to retarget the affinity
1183 * of an MSI from within an IPI handler.
1184 */
1185 spin_lock_irqsave(&cookie->msi_lock, flags);
1186 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1187 spin_unlock_irqrestore(&cookie->msi_lock, flags);
1188
Julien Grallece6e6f2019-05-01 14:58:19 +01001189 msi_desc_set_iommu_cookie(desc, msi_page);
1190
1191 if (!msi_page)
1192 return -ENOMEM;
1193 return 0;
1194}
1195
1196void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1197 struct msi_msg *msg)
1198{
1199 struct device *dev = msi_desc_to_dev(desc);
1200 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1201 const struct iommu_dma_msi_page *msi_page;
1202
1203 msi_page = msi_desc_get_iommu_cookie(desc);
1204
1205 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1206 return;
1207
1208 msg->address_hi = upper_32_bits(msi_page->iova);
1209 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1210 msg->address_lo += lower_32_bits(msi_page->iova);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001211}
Christoph Hellwig06d60722019-05-20 09:29:29 +02001212
1213static int iommu_dma_init(void)
1214{
1215 return iova_cache_get();
1216}
1217arch_initcall(iommu_dma_init);