blob: 4959f5df21bd0714b5e88c2dc165c13ff16476eb [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Robin Murphy0db2e5d2015-10-01 20:13:58 +01002/*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
Robin Murphy0db2e5d2015-10-01 20:13:58 +01009 */
10
Shameer Kolothumf51dc892018-02-13 15:20:51 +000011#include <linux/acpi_iort.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010012#include <linux/device.h>
Christoph Hellwig06d60722019-05-20 09:29:29 +020013#include <linux/dma-contiguous.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010014#include <linux/dma-iommu.h>
Christoph Hellwigaf751d42019-05-20 09:29:27 +020015#include <linux/dma-noncoherent.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000016#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010017#include <linux/huge_mm.h>
18#include <linux/iommu.h>
19#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010020#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010021#include <linux/mm.h>
Robin Murphyc1864792019-12-09 19:47:25 +000022#include <linux/mutex.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010023#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/scatterlist.h>
25#include <linux/vmalloc.h>
Tom Murphy795bbbb2019-09-08 09:56:39 -070026#include <linux/crash_dump.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010027
Robin Murphy44bb7e22016-09-12 17:13:59 +010028struct iommu_dma_msi_page {
29 struct list_head list;
30 dma_addr_t iova;
31 phys_addr_t phys;
32};
33
Robin Murphyfdbe5742017-01-19 20:57:46 +000034enum iommu_dma_cookie_type {
35 IOMMU_DMA_IOVA_COOKIE,
36 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010037};
38
Robin Murphyfdbe5742017-01-19 20:57:46 +000039struct iommu_dma_cookie {
40 enum iommu_dma_cookie_type type;
41 union {
42 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
43 struct iova_domain iovad;
44 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
45 dma_addr_t msi_iova;
46 };
47 struct list_head msi_page_list;
Zhen Lei2da274c2018-09-20 17:10:22 +010048
49 /* Domain for flush queue callback; NULL if flush queue not in use */
50 struct iommu_domain *fq_domain;
Robin Murphyfdbe5742017-01-19 20:57:46 +000051};
52
53static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
54{
55 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
56 return cookie->iovad.granule;
57 return PAGE_SIZE;
58}
59
Robin Murphyfdbe5742017-01-19 20:57:46 +000060static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
61{
62 struct iommu_dma_cookie *cookie;
63
64 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
65 if (cookie) {
Robin Murphyfdbe5742017-01-19 20:57:46 +000066 INIT_LIST_HEAD(&cookie->msi_page_list);
67 cookie->type = type;
68 }
69 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010070}
71
Robin Murphy0db2e5d2015-10-01 20:13:58 +010072/**
73 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
74 * @domain: IOMMU domain to prepare for DMA-API usage
75 *
76 * IOMMU drivers should normally call this from their domain_alloc
77 * callback when domain->type == IOMMU_DOMAIN_DMA.
78 */
79int iommu_get_dma_cookie(struct iommu_domain *domain)
80{
Robin Murphy0db2e5d2015-10-01 20:13:58 +010081 if (domain->iova_cookie)
82 return -EEXIST;
83
Robin Murphyfdbe5742017-01-19 20:57:46 +000084 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
85 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +010086 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010087
Robin Murphy44bb7e22016-09-12 17:13:59 +010088 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010089}
90EXPORT_SYMBOL(iommu_get_dma_cookie);
91
92/**
Robin Murphyfdbe5742017-01-19 20:57:46 +000093 * iommu_get_msi_cookie - Acquire just MSI remapping resources
94 * @domain: IOMMU domain to prepare
95 * @base: Start address of IOVA region for MSI mappings
96 *
97 * Users who manage their own IOVA allocation and do not want DMA API support,
98 * but would still like to take advantage of automatic MSI remapping, can use
99 * this to initialise their own domain appropriately. Users should reserve a
100 * contiguous IOVA region, starting at @base, large enough to accommodate the
101 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
102 * used by the devices attached to @domain.
103 */
104int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105{
106 struct iommu_dma_cookie *cookie;
107
108 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
109 return -EINVAL;
110
111 if (domain->iova_cookie)
112 return -EEXIST;
113
114 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115 if (!cookie)
116 return -ENOMEM;
117
118 cookie->msi_iova = base;
119 domain->iova_cookie = cookie;
120 return 0;
121}
122EXPORT_SYMBOL(iommu_get_msi_cookie);
123
124/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100125 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
127 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100128 *
129 * IOMMU drivers should normally call this from their domain_free callback.
130 */
131void iommu_put_dma_cookie(struct iommu_domain *domain)
132{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100133 struct iommu_dma_cookie *cookie = domain->iova_cookie;
134 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100135
Robin Murphy44bb7e22016-09-12 17:13:59 +0100136 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100137 return;
138
Robin Murphyfdbe5742017-01-19 20:57:46 +0000139 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100140 put_iova_domain(&cookie->iovad);
141
142 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
143 list_del(&msi->list);
144 kfree(msi);
145 }
146 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 domain->iova_cookie = NULL;
148}
149EXPORT_SYMBOL(iommu_put_dma_cookie);
150
Robin Murphy273df962017-03-16 17:00:19 +0000151/**
152 * iommu_dma_get_resv_regions - Reserved region driver helper
153 * @dev: Device from iommu_get_resv_regions()
154 * @list: Reserved region list from iommu_get_resv_regions()
155 *
156 * IOMMU drivers can use this to implement their .get_resv_regions callback
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100157 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
158 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
159 * reservation.
Robin Murphy273df962017-03-16 17:00:19 +0000160 */
161void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100162{
Robin Murphyfade1ec2016-09-12 17:14:00 +0100163
Joerg Roedel98cc4f72018-11-29 14:01:00 +0100164 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100165 iort_iommu_msi_get_resv_regions(dev, list);
Shameer Kolothumf51dc892018-02-13 15:20:51 +0000166
Robin Murphyfade1ec2016-09-12 17:14:00 +0100167}
Robin Murphy273df962017-03-16 17:00:19 +0000168EXPORT_SYMBOL(iommu_dma_get_resv_regions);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100169
Robin Murphy7c1b0582017-03-16 17:00:18 +0000170static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
171 phys_addr_t start, phys_addr_t end)
172{
173 struct iova_domain *iovad = &cookie->iovad;
174 struct iommu_dma_msi_page *msi_page;
175 int i, num_pages;
176
177 start -= iova_offset(iovad, start);
178 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
179
Robin Murphy7c1b0582017-03-16 17:00:18 +0000180 for (i = 0; i < num_pages; i++) {
Marc Zyngier65ac74f2020-03-04 11:11:17 +0000181 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
182 if (!msi_page)
183 return -ENOMEM;
184
185 msi_page->phys = start;
186 msi_page->iova = start;
187 INIT_LIST_HEAD(&msi_page->list);
188 list_add(&msi_page->list, &cookie->msi_page_list);
Robin Murphy7c1b0582017-03-16 17:00:18 +0000189 start += iovad->granule;
190 }
191
192 return 0;
193}
194
Srinath Mannamaadad0972019-05-03 19:35:33 +0530195static int iova_reserve_pci_windows(struct pci_dev *dev,
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100196 struct iova_domain *iovad)
197{
198 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
199 struct resource_entry *window;
200 unsigned long lo, hi;
Srinath Mannamaadad0972019-05-03 19:35:33 +0530201 phys_addr_t start = 0, end;
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100202
203 resource_list_for_each_entry(window, &bridge->windows) {
204 if (resource_type(window->res) != IORESOURCE_MEM)
205 continue;
206
207 lo = iova_pfn(iovad, window->res->start - window->offset);
208 hi = iova_pfn(iovad, window->res->end - window->offset);
209 reserve_iova(iovad, lo, hi);
210 }
Srinath Mannamaadad0972019-05-03 19:35:33 +0530211
212 /* Get reserved DMA windows from host bridge */
213 resource_list_for_each_entry(window, &bridge->dma_ranges) {
214 end = window->res->start - window->offset;
215resv_iova:
216 if (end > start) {
217 lo = iova_pfn(iovad, start);
218 hi = iova_pfn(iovad, end);
219 reserve_iova(iovad, lo, hi);
220 } else {
221 /* dma_ranges list should be sorted */
222 dev_err(&dev->dev, "Failed to reserve IOVA\n");
223 return -EINVAL;
224 }
225
226 start = window->res->end - window->offset + 1;
227 /* If window is last entry */
228 if (window->node.next == &bridge->dma_ranges &&
Arnd Bergmann29fcea82019-06-17 15:30:54 +0200229 end != ~(phys_addr_t)0) {
230 end = ~(phys_addr_t)0;
Srinath Mannamaadad0972019-05-03 19:35:33 +0530231 goto resv_iova;
232 }
233 }
234
235 return 0;
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100236}
237
Robin Murphy7c1b0582017-03-16 17:00:18 +0000238static int iova_reserve_iommu_regions(struct device *dev,
239 struct iommu_domain *domain)
240{
241 struct iommu_dma_cookie *cookie = domain->iova_cookie;
242 struct iova_domain *iovad = &cookie->iovad;
243 struct iommu_resv_region *region;
244 LIST_HEAD(resv_regions);
245 int ret = 0;
246
Srinath Mannamaadad0972019-05-03 19:35:33 +0530247 if (dev_is_pci(dev)) {
248 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
249 if (ret)
250 return ret;
251 }
Shameer Kolothumcd2c9fc2018-04-18 12:40:42 +0100252
Robin Murphy7c1b0582017-03-16 17:00:18 +0000253 iommu_get_resv_regions(dev, &resv_regions);
254 list_for_each_entry(region, &resv_regions, list) {
255 unsigned long lo, hi;
256
257 /* We ARE the software that manages these! */
258 if (region->type == IOMMU_RESV_SW_MSI)
259 continue;
260
261 lo = iova_pfn(iovad, region->start);
262 hi = iova_pfn(iovad, region->start + region->length - 1);
263 reserve_iova(iovad, lo, hi);
264
265 if (region->type == IOMMU_RESV_MSI)
266 ret = cookie_init_hw_msi_region(cookie, region->start,
267 region->start + region->length);
268 if (ret)
269 break;
270 }
271 iommu_put_resv_regions(dev, &resv_regions);
272
273 return ret;
274}
275
Zhen Lei2da274c2018-09-20 17:10:22 +0100276static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
277{
278 struct iommu_dma_cookie *cookie;
279 struct iommu_domain *domain;
280
281 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
282 domain = cookie->fq_domain;
283 /*
284 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
285 * implies that ops->flush_iotlb_all must be non-NULL.
286 */
287 domain->ops->flush_iotlb_all(domain);
288}
289
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100290/**
291 * iommu_dma_init_domain - Initialise a DMA mapping domain
292 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
293 * @base: IOVA at which the mappable address space starts
294 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100295 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100296 *
297 * @base and @size should be exact multiples of IOMMU page granularity to
298 * avoid rounding surprises. If necessary, we reserve the page at address 0
299 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
300 * any change which could make prior IOVAs invalid will fail.
301 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200302static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
Robin Murphyfade1ec2016-09-12 17:14:00 +0100303 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100304{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000305 struct iommu_dma_cookie *cookie = domain->iova_cookie;
Shaokun Zhangc61a4632019-01-24 15:10:02 +0800306 unsigned long order, base_pfn;
Yunsheng Lin6b0c54e2019-08-24 09:47:12 +0800307 struct iova_domain *iovad;
Zhen Lei2da274c2018-09-20 17:10:22 +0100308 int attr;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100309
Robin Murphyfdbe5742017-01-19 20:57:46 +0000310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
311 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100312
Yunsheng Lin6b0c54e2019-08-24 09:47:12 +0800313 iovad = &cookie->iovad;
314
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100315 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100316 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100317 base_pfn = max_t(unsigned long, 1, base >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100318
319 /* Check the domain allows at least some access to the device... */
320 if (domain->geometry.force_aperture) {
321 if (base > domain->geometry.aperture_end ||
322 base + size <= domain->geometry.aperture_start) {
323 pr_warn("specified DMA range outside IOMMU capability\n");
324 return -EFAULT;
325 }
326 /* ...then finally give it a kicking to make sure it fits */
327 base_pfn = max_t(unsigned long, base_pfn,
328 domain->geometry.aperture_start >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100329 }
330
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000331 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100332 if (iovad->start_pfn) {
333 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000334 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100335 pr_warn("Incompatible range for DMA domain\n");
336 return -EFAULT;
337 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000338
339 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100340 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000341
Zhen Leiaa3ac942017-09-21 16:52:45 +0100342 init_iova_domain(iovad, 1UL << order, base_pfn);
Zhen Lei2da274c2018-09-20 17:10:22 +0100343
344 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
345 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
346 cookie->fq_domain = domain;
347 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
348 }
349
Robin Murphy7c1b0582017-03-16 17:00:18 +0000350 if (!dev)
351 return 0;
352
353 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100354}
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100355
Tom Murphy795bbbb2019-09-08 09:56:39 -0700356static int iommu_dma_deferred_attach(struct device *dev,
357 struct iommu_domain *domain)
358{
359 const struct iommu_ops *ops = domain->ops;
360
361 if (!is_kdump_kernel())
362 return 0;
363
364 if (unlikely(ops->is_attach_deferred &&
365 ops->is_attach_deferred(domain, dev)))
366 return iommu_attach_device(domain, dev);
367
368 return 0;
369}
370
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100371/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530372 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
373 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100374 * @dir: Direction of DMA transfer
375 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530376 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100377 *
378 * Return: corresponding IOMMU API page protection flags
379 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200380static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530381 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100382{
383 int prot = coherent ? IOMMU_CACHE : 0;
384
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530385 if (attrs & DMA_ATTR_PRIVILEGED)
386 prot |= IOMMU_PRIV;
387
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100388 switch (dir) {
389 case DMA_BIDIRECTIONAL:
390 return prot | IOMMU_READ | IOMMU_WRITE;
391 case DMA_TO_DEVICE:
392 return prot | IOMMU_READ;
393 case DMA_FROM_DEVICE:
394 return prot | IOMMU_WRITE;
395 default:
396 return 0;
397 }
398}
399
Robin Murphy842fe512017-03-31 15:46:05 +0100400static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
Robin Murphybd036d22019-12-11 18:33:26 +0000401 size_t size, u64 dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100402{
Robin Murphya44e6652017-03-31 15:46:06 +0100403 struct iommu_dma_cookie *cookie = domain->iova_cookie;
404 struct iova_domain *iovad = &cookie->iovad;
Robin Murphybb65a642017-03-31 15:46:07 +0100405 unsigned long shift, iova_len, iova = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100406
Robin Murphya44e6652017-03-31 15:46:06 +0100407 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
408 cookie->msi_iova += size;
409 return cookie->msi_iova - size;
410 }
411
412 shift = iova_shift(iovad);
413 iova_len = size >> shift;
Robin Murphybb65a642017-03-31 15:46:07 +0100414 /*
415 * Freeing non-power-of-two-sized allocations back into the IOVA caches
416 * will come back to bite us badly, so we have to waste a bit of space
417 * rounding up anything cacheable to make sure that can't happen. The
418 * order of the unadjusted size will still match upon freeing.
419 */
420 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
421 iova_len = roundup_pow_of_two(iova_len);
Robin Murphya44e6652017-03-31 15:46:06 +0100422
Nicolas Saenz Juliennea7ba70f2019-11-21 10:26:44 +0100423 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
Robin Murphy03bfdc32018-07-23 23:16:10 +0100424
Robin Murphyc987ff02016-08-09 17:31:35 +0100425 if (domain->geometry.force_aperture)
Robin Murphybd036d22019-12-11 18:33:26 +0000426 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000427
428 /* Try to get PCI devices a SAC address */
429 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200430 iova = alloc_iova_fast(iovad, iova_len,
431 DMA_BIT_MASK(32) >> shift, false);
Robin Murphy122fac02017-01-16 13:24:55 +0000432
Robin Murphybb65a642017-03-31 15:46:07 +0100433 if (!iova)
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200434 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
435 true);
Robin Murphybb65a642017-03-31 15:46:07 +0100436
437 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100438}
439
Robin Murphy842fe512017-03-31 15:46:05 +0100440static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
441 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100442{
Robin Murphy842fe512017-03-31 15:46:05 +0100443 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100444
Robin Murphya44e6652017-03-31 15:46:06 +0100445 /* The MSI case is only ever cleaning up its most recent allocation */
Robin Murphybb65a642017-03-31 15:46:07 +0100446 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
Robin Murphya44e6652017-03-31 15:46:06 +0100447 cookie->msi_iova -= size;
Zhen Lei2da274c2018-09-20 17:10:22 +0100448 else if (cookie->fq_domain) /* non-strict mode */
449 queue_iova(iovad, iova_pfn(iovad, iova),
450 size >> iova_shift(iovad), 0);
Robin Murphybb65a642017-03-31 15:46:07 +0100451 else
Robin Murphy1cc896e2017-05-15 16:01:30 +0100452 free_iova_fast(iovad, iova_pfn(iovad, iova),
453 size >> iova_shift(iovad));
Robin Murphy842fe512017-03-31 15:46:05 +0100454}
455
Robin Murphyb61d2712019-05-20 09:29:31 +0200456static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
Robin Murphy842fe512017-03-31 15:46:05 +0100457 size_t size)
458{
Robin Murphyb61d2712019-05-20 09:29:31 +0200459 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphya44e6652017-03-31 15:46:06 +0100460 struct iommu_dma_cookie *cookie = domain->iova_cookie;
461 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy842fe512017-03-31 15:46:05 +0100462 size_t iova_off = iova_offset(iovad, dma_addr);
Will Deacona7d20dc2019-07-02 16:43:48 +0100463 struct iommu_iotlb_gather iotlb_gather;
464 size_t unmapped;
Robin Murphy842fe512017-03-31 15:46:05 +0100465
466 dma_addr -= iova_off;
467 size = iova_align(iovad, size + iova_off);
Will Deacona7d20dc2019-07-02 16:43:48 +0100468 iommu_iotlb_gather_init(&iotlb_gather);
Robin Murphy842fe512017-03-31 15:46:05 +0100469
Will Deacona7d20dc2019-07-02 16:43:48 +0100470 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
471 WARN_ON(unmapped != size);
472
Zhen Lei2da274c2018-09-20 17:10:22 +0100473 if (!cookie->fq_domain)
Will Deacona7d20dc2019-07-02 16:43:48 +0100474 iommu_tlb_sync(domain, &iotlb_gather);
Robin Murphya44e6652017-03-31 15:46:06 +0100475 iommu_dma_free_iova(cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100476}
477
Christoph Hellwig92aec092019-05-20 09:29:30 +0200478static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
Robin Murphybd036d22019-12-11 18:33:26 +0000479 size_t size, int prot, u64 dma_mask)
Christoph Hellwig92aec092019-05-20 09:29:30 +0200480{
Robin Murphyb61d2712019-05-20 09:29:31 +0200481 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Christoph Hellwig92aec092019-05-20 09:29:30 +0200482 struct iommu_dma_cookie *cookie = domain->iova_cookie;
Robin Murphy8af23fa2019-07-29 16:32:38 +0100483 struct iova_domain *iovad = &cookie->iovad;
484 size_t iova_off = iova_offset(iovad, phys);
Christoph Hellwig92aec092019-05-20 09:29:30 +0200485 dma_addr_t iova;
486
Tom Murphy795bbbb2019-09-08 09:56:39 -0700487 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
488 return DMA_MAPPING_ERROR;
489
Robin Murphy8af23fa2019-07-29 16:32:38 +0100490 size = iova_align(iovad, size + iova_off);
Christoph Hellwig92aec092019-05-20 09:29:30 +0200491
Tom Murphy6e235022019-09-08 09:56:40 -0700492 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
Christoph Hellwig92aec092019-05-20 09:29:30 +0200493 if (!iova)
494 return DMA_MAPPING_ERROR;
495
Tom Murphy781ca2d2019-09-08 09:56:38 -0700496 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
Christoph Hellwig92aec092019-05-20 09:29:30 +0200497 iommu_dma_free_iova(cookie, iova, size);
498 return DMA_MAPPING_ERROR;
499 }
500 return iova + iova_off;
501}
502
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100503static void __iommu_dma_free_pages(struct page **pages, int count)
504{
505 while (count--)
506 __free_page(pages[count]);
507 kvfree(pages);
508}
509
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800510static struct page **__iommu_dma_alloc_pages(struct device *dev,
511 unsigned int count, unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100512{
513 struct page **pages;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800514 unsigned int i = 0, nid = dev_to_node(dev);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100515
516 order_mask &= (2U << MAX_ORDER) - 1;
517 if (!order_mask)
518 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100519
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800520 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100521 if (!pages)
522 return NULL;
523
524 /* IOMMU can map any pages, so himem can also be used here */
525 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
526
527 while (count) {
528 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100529 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100530
531 /*
532 * Higher-order allocations are a convenience rather
533 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100534 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100535 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100536 for (order_mask &= (2U << __fls(count)) - 1;
537 order_mask; order_mask &= ~order_size) {
538 unsigned int order = __fls(order_mask);
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800539 gfp_t alloc_flags = gfp;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100540
541 order_size = 1U << order;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800542 if (order_mask > order_size)
543 alloc_flags |= __GFP_NORETRY;
544 page = alloc_pages_node(nid, alloc_flags, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100545 if (!page)
546 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100547 if (!order)
548 break;
549 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100550 split_page(page, order);
551 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100552 } else if (!split_huge_page(page)) {
553 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100554 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100555 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100556 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100557 if (!page) {
558 __iommu_dma_free_pages(pages, i);
559 return NULL;
560 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100561 count -= order_size;
562 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100563 pages[i++] = page++;
564 }
565 return pages;
566}
567
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100568/**
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200569 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100570 * @dev: Device to allocate memory for. Must be a real device
571 * attached to an iommu_dma_domain
572 * @size: Size of buffer in bytes
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200573 * @dma_handle: Out argument for allocated DMA handle
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100574 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100575 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100576 *
577 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
578 * but an IOMMU which supports smaller pages might not map the whole thing.
579 *
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200580 * Return: Mapped virtual address, or NULL on failure.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100581 */
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200582static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
583 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100584{
Robin Murphy43c5bf12018-09-12 16:24:13 +0100585 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100586 struct iommu_dma_cookie *cookie = domain->iova_cookie;
587 struct iova_domain *iovad = &cookie->iovad;
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200588 bool coherent = dev_is_dma_coherent(dev);
589 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Christoph Hellwig33dcb372019-07-26 09:26:40 +0200590 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200591 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100592 struct page **pages;
593 struct sg_table sgt;
Robin Murphy842fe512017-03-31 15:46:05 +0100594 dma_addr_t iova;
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200595 void *vaddr;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100596
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200597 *dma_handle = DMA_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100598
Tom Murphy795bbbb2019-09-08 09:56:39 -0700599 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
600 return NULL;
601
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100602 min_size = alloc_sizes & -alloc_sizes;
603 if (min_size < PAGE_SIZE) {
604 min_size = PAGE_SIZE;
605 alloc_sizes |= PAGE_SIZE;
606 } else {
607 size = ALIGN(size, min_size);
608 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700609 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100610 alloc_sizes = min_size;
611
612 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Ganapatrao Kulkarnic4b17afb02018-11-30 19:14:00 +0800613 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
614 gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100615 if (!pages)
616 return NULL;
617
Robin Murphy842fe512017-03-31 15:46:05 +0100618 size = iova_align(iovad, size);
619 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100620 if (!iova)
621 goto out_free_pages;
622
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100623 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
624 goto out_free_iova;
625
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200626 if (!(ioprot & IOMMU_CACHE)) {
Christoph Hellwig23f88e02019-05-20 09:29:28 +0200627 struct scatterlist *sg;
628 int i;
629
630 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
631 arch_dma_prep_coherent(sg_page(sg), sg->length);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100632 }
633
Tom Murphy781ca2d2019-09-08 09:56:38 -0700634 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100635 < size)
636 goto out_free_sg;
637
Christoph Hellwig51231742019-08-30 08:51:01 +0200638 vaddr = dma_common_pages_remap(pages, size, prot,
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200639 __builtin_return_address(0));
640 if (!vaddr)
641 goto out_unmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100642
Christoph Hellwig21b95aa2019-05-20 09:29:34 +0200643 *dma_handle = iova;
644 sg_free_table(&sgt);
645 return vaddr;
646
647out_unmap:
648 __iommu_dma_unmap(dev, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100649out_free_sg:
650 sg_free_table(&sgt);
651out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100652 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100653out_free_pages:
654 __iommu_dma_free_pages(pages, count);
655 return NULL;
656}
657
658/**
Christoph Hellwig06d60722019-05-20 09:29:29 +0200659 * __iommu_dma_mmap - Map a buffer into provided user VMA
660 * @pages: Array representing buffer from __iommu_dma_alloc()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100661 * @size: Size of buffer in bytes
662 * @vma: VMA describing requested userspace mapping
663 *
664 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
665 * for verifying the correct size and protection of @vma beforehand.
666 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200667static int __iommu_dma_mmap(struct page **pages, size_t size,
668 struct vm_area_struct *vma)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100669{
Souptick Joarderb0d00842019-05-13 17:22:15 -0700670 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100671}
672
Christoph Hellwig06d60722019-05-20 09:29:29 +0200673static void iommu_dma_sync_single_for_cpu(struct device *dev,
674 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100675{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200676 phys_addr_t phys;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100677
Christoph Hellwig06d60722019-05-20 09:29:29 +0200678 if (dev_is_dma_coherent(dev))
679 return;
Robin Murphy1cc896e2017-05-15 16:01:30 +0100680
Christoph Hellwig06d60722019-05-20 09:29:29 +0200681 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100682 arch_sync_dma_for_cpu(phys, size, dir);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100683}
684
Christoph Hellwig06d60722019-05-20 09:29:29 +0200685static void iommu_dma_sync_single_for_device(struct device *dev,
686 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
Robin Murphy51f8cc92016-11-14 12:16:26 +0000687{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200688 phys_addr_t phys;
689
690 if (dev_is_dma_coherent(dev))
691 return;
692
693 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100694 arch_sync_dma_for_device(phys, size, dir);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000695}
696
Christoph Hellwig06d60722019-05-20 09:29:29 +0200697static void iommu_dma_sync_sg_for_cpu(struct device *dev,
698 struct scatterlist *sgl, int nelems,
699 enum dma_data_direction dir)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100700{
Christoph Hellwig06d60722019-05-20 09:29:29 +0200701 struct scatterlist *sg;
702 int i;
703
704 if (dev_is_dma_coherent(dev))
705 return;
706
707 for_each_sg(sgl, sg, nelems, i)
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100708 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200709}
710
711static void iommu_dma_sync_sg_for_device(struct device *dev,
712 struct scatterlist *sgl, int nelems,
713 enum dma_data_direction dir)
714{
715 struct scatterlist *sg;
716 int i;
717
718 if (dev_is_dma_coherent(dev))
719 return;
720
721 for_each_sg(sgl, sg, nelems, i)
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100722 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200723}
724
Christoph Hellwig06d60722019-05-20 09:29:29 +0200725static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
726 unsigned long offset, size_t size, enum dma_data_direction dir,
727 unsigned long attrs)
728{
729 phys_addr_t phys = page_to_phys(page) + offset;
730 bool coherent = dev_is_dma_coherent(dev);
Robin Murphyb61d2712019-05-20 09:29:31 +0200731 int prot = dma_info_to_prot(dir, coherent, attrs);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200732 dma_addr_t dma_handle;
733
Tom Murphy6e235022019-09-08 09:56:40 -0700734 dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
Christoph Hellwig06d60722019-05-20 09:29:29 +0200735 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
736 dma_handle != DMA_MAPPING_ERROR)
Christoph Hellwig56e35f92019-11-07 18:03:11 +0100737 arch_sync_dma_for_device(phys, size, dir);
Christoph Hellwig06d60722019-05-20 09:29:29 +0200738 return dma_handle;
739}
740
741static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
742 size_t size, enum dma_data_direction dir, unsigned long attrs)
743{
744 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
745 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
Robin Murphyb61d2712019-05-20 09:29:31 +0200746 __iommu_dma_unmap(dev, dma_handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100747}
748
749/*
750 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100751 *
752 * At this point the segments are already laid out by iommu_dma_map_sg() to
753 * avoid individually crossing any boundaries, so we merely need to check a
754 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100755 */
756static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
757 dma_addr_t dma_addr)
758{
Robin Murphy809eac52016-04-11 12:32:31 +0100759 struct scatterlist *s, *cur = sg;
760 unsigned long seg_mask = dma_get_seg_boundary(dev);
761 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
762 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100763
764 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100765 /* Restore this segment's original unaligned fields first */
766 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100767 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100768 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100769
Robin Murphy809eac52016-04-11 12:32:31 +0100770 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100771 s->length = s_length;
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100772 sg_dma_address(s) = DMA_MAPPING_ERROR;
Robin Murphy809eac52016-04-11 12:32:31 +0100773 sg_dma_len(s) = 0;
774
775 /*
776 * Now fill in the real DMA data. If...
777 * - there is a valid output segment to append to
778 * - and this segment starts on an IOVA page boundary
779 * - but doesn't fall at a segment boundary
780 * - and wouldn't make the resulting output segment too long
781 */
782 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
Robin Murphyab2cbeb2019-07-29 17:46:00 +0100783 (max_len - cur_len >= s_length)) {
Robin Murphy809eac52016-04-11 12:32:31 +0100784 /* ...then concatenate it with the previous one */
785 cur_len += s_length;
786 } else {
787 /* Otherwise start the next output segment */
788 if (i > 0)
789 cur = sg_next(cur);
790 cur_len = s_length;
791 count++;
792
793 sg_dma_address(cur) = dma_addr + s_iova_off;
794 }
795
796 sg_dma_len(cur) = cur_len;
797 dma_addr += s_iova_len;
798
799 if (s_length + s_iova_off < s_iova_len)
800 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100801 }
Robin Murphy809eac52016-04-11 12:32:31 +0100802 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100803}
804
805/*
806 * If mapping failed, then just restore the original list,
807 * but making sure the DMA fields are invalidated.
808 */
809static void __invalidate_sg(struct scatterlist *sg, int nents)
810{
811 struct scatterlist *s;
812 int i;
813
814 for_each_sg(sg, s, nents, i) {
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100815 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000816 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100817 if (sg_dma_len(s))
818 s->length = sg_dma_len(s);
Christoph Hellwigcad34be2018-11-21 19:35:19 +0100819 sg_dma_address(s) = DMA_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100820 sg_dma_len(s) = 0;
821 }
822}
823
824/*
825 * The DMA API client is passing in a scatterlist which could describe
826 * any old buffer layout, but the IOMMU API requires everything to be
827 * aligned to IOMMU pages. Hence the need for this complicated bit of
828 * impedance-matching, to be able to hand off a suitably-aligned list,
829 * but still preserve the original offsets and sizes for the caller.
830 */
Christoph Hellwig06d60722019-05-20 09:29:29 +0200831static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
832 int nents, enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100833{
Robin Murphy43c5bf12018-09-12 16:24:13 +0100834 struct iommu_domain *domain = iommu_get_dma_domain(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100835 struct iommu_dma_cookie *cookie = domain->iova_cookie;
836 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100837 struct scatterlist *s, *prev = NULL;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200838 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
Robin Murphy842fe512017-03-31 15:46:05 +0100839 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100840 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100841 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100842 int i;
843
Tom Murphy795bbbb2019-09-08 09:56:39 -0700844 if (unlikely(iommu_dma_deferred_attach(dev, domain)))
845 return 0;
846
Christoph Hellwig06d60722019-05-20 09:29:29 +0200847 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
848 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
849
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100850 /*
851 * Work out how much IOVA space we need, and align the segments to
852 * IOVA granules for the IOMMU driver to handle. With some clever
853 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100854 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100855 */
856 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100857 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100858 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100859 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100860
Robin Murphy809eac52016-04-11 12:32:31 +0100861 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100862 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100863 s->offset -= s_iova_off;
864 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100865 s->length = s_length;
866
867 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100868 * Due to the alignment of our single IOVA allocation, we can
869 * depend on these assumptions about the segment boundary mask:
870 * - If mask size >= IOVA size, then the IOVA range cannot
871 * possibly fall across a boundary, so we don't care.
872 * - If mask size < IOVA size, then the IOVA range must start
873 * exactly on a boundary, therefore we can lay things out
874 * based purely on segment lengths without needing to know
875 * the actual addresses beforehand.
876 * - The mask must be a power of 2, so pad_len == 0 if
877 * iova_len == 0, thus we cannot dereference prev the first
878 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100879 */
Robin Murphy809eac52016-04-11 12:32:31 +0100880 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100881 prev->length += pad_len;
882 iova_len += pad_len;
883 }
884
885 iova_len += s_length;
886 prev = s;
887 }
888
Robin Murphy842fe512017-03-31 15:46:05 +0100889 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100890 if (!iova)
891 goto out_restore_sg;
892
893 /*
894 * We'll leave any physical concatenation to the IOMMU driver's
895 * implementation - it knows better than we do.
896 */
Tom Murphy781ca2d2019-09-08 09:56:38 -0700897 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100898 goto out_free_iova;
899
Robin Murphy842fe512017-03-31 15:46:05 +0100900 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100901
902out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100903 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100904out_restore_sg:
905 __invalidate_sg(sg, nents);
906 return 0;
907}
908
Christoph Hellwig06d60722019-05-20 09:29:29 +0200909static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
910 int nents, enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100911{
Robin Murphy842fe512017-03-31 15:46:05 +0100912 dma_addr_t start, end;
913 struct scatterlist *tmp;
914 int i;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200915
Nathan Chancellor1b9614232019-05-29 01:15:32 -0700916 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
Christoph Hellwig06d60722019-05-20 09:29:29 +0200917 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
918
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100919 /*
920 * The scatterlist segments are mapped into a single
921 * contiguous IOVA allocation, so this is incredibly easy.
922 */
Robin Murphy842fe512017-03-31 15:46:05 +0100923 start = sg_dma_address(sg);
924 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
925 if (sg_dma_len(tmp) == 0)
926 break;
927 sg = tmp;
928 }
929 end = sg_dma_address(sg) + sg_dma_len(sg);
Robin Murphyb61d2712019-05-20 09:29:31 +0200930 __iommu_dma_unmap(dev, start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100931}
932
Christoph Hellwig06d60722019-05-20 09:29:29 +0200933static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
Robin Murphy51f8cc92016-11-14 12:16:26 +0000934 size_t size, enum dma_data_direction dir, unsigned long attrs)
935{
936 return __iommu_dma_map(dev, phys, size,
Tom Murphy6e235022019-09-08 09:56:40 -0700937 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
938 dma_get_mask(dev));
Robin Murphy51f8cc92016-11-14 12:16:26 +0000939}
940
Christoph Hellwig06d60722019-05-20 09:29:29 +0200941static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
Robin Murphy51f8cc92016-11-14 12:16:26 +0000942 size_t size, enum dma_data_direction dir, unsigned long attrs)
943{
Robin Murphyb61d2712019-05-20 09:29:31 +0200944 __iommu_dma_unmap(dev, handle, size);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000945}
946
Robin Murphy8553f6e2019-05-20 09:29:40 +0200947static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200948{
949 size_t alloc_size = PAGE_ALIGN(size);
950 int count = alloc_size >> PAGE_SHIFT;
951 struct page *page = NULL, **pages = NULL;
952
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200953 /* Non-coherent atomic allocation? Easy */
Christoph Hellwige6475eb2019-05-20 09:29:45 +0200954 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
David Rientjesc84dc6e2020-04-14 17:04:55 -0700955 dma_free_from_pool(dev, cpu_addr, alloc_size))
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200956 return;
957
Christoph Hellwige6475eb2019-05-20 09:29:45 +0200958 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200959 /*
960 * If it the address is remapped, then it's either non-coherent
961 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
962 */
Christoph Hellwig5cf45372019-06-03 09:14:31 +0200963 pages = dma_common_find_pages(cpu_addr);
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200964 if (!pages)
965 page = vmalloc_to_page(cpu_addr);
Christoph Hellwig51231742019-08-30 08:51:01 +0200966 dma_common_free_remap(cpu_addr, alloc_size);
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200967 } else {
968 /* Lowmem means a coherent atomic or CMA allocation */
969 page = virt_to_page(cpu_addr);
970 }
971
972 if (pages)
973 __iommu_dma_free_pages(pages, count);
Nicolin Chen591fcf32019-06-03 15:52:59 -0700974 if (page)
975 dma_free_contiguous(dev, page, alloc_size);
Robin Murphybcf4b9c2019-05-20 09:29:36 +0200976}
977
Robin Murphy8553f6e2019-05-20 09:29:40 +0200978static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
979 dma_addr_t handle, unsigned long attrs)
980{
981 __iommu_dma_unmap(dev, handle, size);
982 __iommu_dma_free(dev, size, cpu_addr);
983}
984
Christoph Hellwigee1ef052019-05-20 09:29:42 +0200985static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
986 struct page **pagep, gfp_t gfp, unsigned long attrs)
Christoph Hellwig06d60722019-05-20 09:29:29 +0200987{
988 bool coherent = dev_is_dma_coherent(dev);
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200989 size_t alloc_size = PAGE_ALIGN(size);
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900990 int node = dev_to_node(dev);
Christoph Hellwig9a4ab942019-05-20 09:29:39 +0200991 struct page *page = NULL;
Robin Murphy9ad5d6e2019-05-20 09:29:41 +0200992 void *cpu_addr;
Christoph Hellwig06d60722019-05-20 09:29:29 +0200993
Nicolin Chen591fcf32019-06-03 15:52:59 -0700994 page = dma_alloc_contiguous(dev, alloc_size, gfp);
Robin Murphy072bebc2019-05-20 09:29:37 +0200995 if (!page)
Christoph Hellwig90ae4092019-08-20 11:45:49 +0900996 page = alloc_pages_node(node, gfp, get_order(alloc_size));
997 if (!page)
Robin Murphy072bebc2019-05-20 09:29:37 +0200998 return NULL;
999
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001000 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
Christoph Hellwig33dcb372019-07-26 09:26:40 +02001001 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
Robin Murphy072bebc2019-05-20 09:29:37 +02001002
Robin Murphy9ad5d6e2019-05-20 09:29:41 +02001003 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
Christoph Hellwig51231742019-08-30 08:51:01 +02001004 prot, __builtin_return_address(0));
Robin Murphy9ad5d6e2019-05-20 09:29:41 +02001005 if (!cpu_addr)
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001006 goto out_free_pages;
Robin Murphy8680aa52019-05-20 09:29:38 +02001007
1008 if (!coherent)
Robin Murphy9ad5d6e2019-05-20 09:29:41 +02001009 arch_dma_prep_coherent(page, size);
Robin Murphy8680aa52019-05-20 09:29:38 +02001010 } else {
Robin Murphy9ad5d6e2019-05-20 09:29:41 +02001011 cpu_addr = page_address(page);
Robin Murphy8680aa52019-05-20 09:29:38 +02001012 }
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001013
1014 *pagep = page;
Robin Murphy9ad5d6e2019-05-20 09:29:41 +02001015 memset(cpu_addr, 0, alloc_size);
1016 return cpu_addr;
Robin Murphy072bebc2019-05-20 09:29:37 +02001017out_free_pages:
Nicolin Chen591fcf32019-06-03 15:52:59 -07001018 dma_free_contiguous(dev, page, alloc_size);
Robin Murphy072bebc2019-05-20 09:29:37 +02001019 return NULL;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001020}
1021
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001022static void *iommu_dma_alloc(struct device *dev, size_t size,
1023 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1024{
1025 bool coherent = dev_is_dma_coherent(dev);
1026 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1027 struct page *page = NULL;
1028 void *cpu_addr;
1029
1030 gfp |= __GFP_ZERO;
1031
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001032 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001033 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
1034 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1035
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001036 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1037 !gfpflags_allow_blocking(gfp) && !coherent)
David Rientjesc84dc6e2020-04-14 17:04:55 -07001038 cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
1039 gfp);
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001040 else
1041 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1042 if (!cpu_addr)
1043 return NULL;
1044
Tom Murphy6e235022019-09-08 09:56:40 -07001045 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1046 dev->coherent_dma_mask);
Christoph Hellwigee1ef052019-05-20 09:29:42 +02001047 if (*handle == DMA_MAPPING_ERROR) {
1048 __iommu_dma_free(dev, size, cpu_addr);
1049 return NULL;
1050 }
1051
1052 return cpu_addr;
1053}
1054
Christoph Hellwig06d60722019-05-20 09:29:29 +02001055static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1056 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1057 unsigned long attrs)
1058{
1059 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001060 unsigned long pfn, off = vma->vm_pgoff;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001061 int ret;
1062
Christoph Hellwig33dcb372019-07-26 09:26:40 +02001063 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
Christoph Hellwig06d60722019-05-20 09:29:29 +02001064
1065 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1066 return ret;
1067
1068 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1069 return -ENXIO;
1070
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001071 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Christoph Hellwig5cf45372019-06-03 09:14:31 +02001072 struct page **pages = dma_common_find_pages(cpu_addr);
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001073
1074 if (pages)
1075 return __iommu_dma_mmap(pages, size, vma);
1076 pfn = vmalloc_to_pfn(cpu_addr);
1077 } else {
1078 pfn = page_to_pfn(virt_to_page(cpu_addr));
Christoph Hellwig06d60722019-05-20 09:29:29 +02001079 }
1080
Christoph Hellwigefd9f102019-05-20 09:29:44 +02001081 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1082 vma->vm_end - vma->vm_start,
1083 vma->vm_page_prot);
Christoph Hellwig06d60722019-05-20 09:29:29 +02001084}
1085
Christoph Hellwig06d60722019-05-20 09:29:29 +02001086static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1087 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1088 unsigned long attrs)
1089{
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001090 struct page *page;
1091 int ret;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001092
Christoph Hellwige6475eb2019-05-20 09:29:45 +02001093 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
Christoph Hellwig5cf45372019-06-03 09:14:31 +02001094 struct page **pages = dma_common_find_pages(cpu_addr);
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001095
1096 if (pages) {
1097 return sg_alloc_table_from_pages(sgt, pages,
1098 PAGE_ALIGN(size) >> PAGE_SHIFT,
1099 0, size, GFP_KERNEL);
1100 }
1101
1102 page = vmalloc_to_page(cpu_addr);
1103 } else {
1104 page = virt_to_page(cpu_addr);
Christoph Hellwig06d60722019-05-20 09:29:29 +02001105 }
1106
Christoph Hellwig3fb33782019-05-20 09:29:43 +02001107 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1108 if (!ret)
1109 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1110 return ret;
Christoph Hellwig06d60722019-05-20 09:29:29 +02001111}
1112
Yoshihiro Shimoda158a6d32019-08-28 21:35:41 +09001113static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1114{
1115 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1116
1117 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1118}
1119
Christoph Hellwig06d60722019-05-20 09:29:29 +02001120static const struct dma_map_ops iommu_dma_ops = {
1121 .alloc = iommu_dma_alloc,
1122 .free = iommu_dma_free,
1123 .mmap = iommu_dma_mmap,
1124 .get_sgtable = iommu_dma_get_sgtable,
1125 .map_page = iommu_dma_map_page,
1126 .unmap_page = iommu_dma_unmap_page,
1127 .map_sg = iommu_dma_map_sg,
1128 .unmap_sg = iommu_dma_unmap_sg,
1129 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1130 .sync_single_for_device = iommu_dma_sync_single_for_device,
1131 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1132 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1133 .map_resource = iommu_dma_map_resource,
1134 .unmap_resource = iommu_dma_unmap_resource,
Yoshihiro Shimoda158a6d32019-08-28 21:35:41 +09001135 .get_merge_boundary = iommu_dma_get_merge_boundary,
Christoph Hellwig06d60722019-05-20 09:29:29 +02001136};
1137
1138/*
1139 * The IOMMU core code allocates the default DMA domain, which the underlying
1140 * IOMMU driver needs to support via the dma-iommu layer.
1141 */
1142void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1143{
1144 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1145
1146 if (!domain)
1147 goto out_err;
1148
1149 /*
1150 * The IOMMU core code allocates the default DMA domain, which the
1151 * underlying IOMMU driver needs to support via the dma-iommu layer.
1152 */
1153 if (domain->type == IOMMU_DOMAIN_DMA) {
1154 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1155 goto out_err;
1156 dev->dma_ops = &iommu_dma_ops;
1157 }
1158
1159 return;
1160out_err:
1161 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1162 dev_name(dev));
Robin Murphy44bb7e22016-09-12 17:13:59 +01001163}
1164
Robin Murphy44bb7e22016-09-12 17:13:59 +01001165static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1166 phys_addr_t msi_addr, struct iommu_domain *domain)
1167{
1168 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1169 struct iommu_dma_msi_page *msi_page;
Robin Murphy842fe512017-03-31 15:46:05 +01001170 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001171 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +00001172 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001173
Robin Murphyfdbe5742017-01-19 20:57:46 +00001174 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001175 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1176 if (msi_page->phys == msi_addr)
1177 return msi_page;
1178
Robin Murphyc1864792019-12-09 19:47:25 +00001179 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001180 if (!msi_page)
1181 return NULL;
1182
Robin Murphy8af23fa2019-07-29 16:32:38 +01001183 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1184 if (!iova)
Robin Murphya44e6652017-03-31 15:46:06 +01001185 goto out_free_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001186
Robin Murphy8af23fa2019-07-29 16:32:38 +01001187 if (iommu_map(domain, iova, msi_addr, size, prot))
1188 goto out_free_iova;
1189
Robin Murphy44bb7e22016-09-12 17:13:59 +01001190 INIT_LIST_HEAD(&msi_page->list);
Robin Murphya44e6652017-03-31 15:46:06 +01001191 msi_page->phys = msi_addr;
1192 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +01001193 list_add(&msi_page->list, &cookie->msi_page_list);
1194 return msi_page;
1195
Robin Murphy8af23fa2019-07-29 16:32:38 +01001196out_free_iova:
1197 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001198out_free_page:
1199 kfree(msi_page);
1200 return NULL;
1201}
1202
Julien Grallece6e6f2019-05-01 14:58:19 +01001203int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
Robin Murphy44bb7e22016-09-12 17:13:59 +01001204{
Julien Grallece6e6f2019-05-01 14:58:19 +01001205 struct device *dev = msi_desc_to_dev(desc);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001206 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001207 struct iommu_dma_msi_page *msi_page;
Robin Murphyc1864792019-12-09 19:47:25 +00001208 static DEFINE_MUTEX(msi_prepare_lock); /* see below */
Robin Murphy44bb7e22016-09-12 17:13:59 +01001209
Julien Grallece6e6f2019-05-01 14:58:19 +01001210 if (!domain || !domain->iova_cookie) {
1211 desc->iommu_cookie = NULL;
1212 return 0;
1213 }
Robin Murphy44bb7e22016-09-12 17:13:59 +01001214
Robin Murphy44bb7e22016-09-12 17:13:59 +01001215 /*
Robin Murphyc1864792019-12-09 19:47:25 +00001216 * In fact the whole prepare operation should already be serialised by
1217 * irq_domain_mutex further up the callchain, but that's pretty subtle
1218 * on its own, so consider this locking as failsafe documentation...
Robin Murphy44bb7e22016-09-12 17:13:59 +01001219 */
Robin Murphyc1864792019-12-09 19:47:25 +00001220 mutex_lock(&msi_prepare_lock);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001221 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
Robin Murphyc1864792019-12-09 19:47:25 +00001222 mutex_unlock(&msi_prepare_lock);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001223
Julien Grallece6e6f2019-05-01 14:58:19 +01001224 msi_desc_set_iommu_cookie(desc, msi_page);
1225
1226 if (!msi_page)
1227 return -ENOMEM;
1228 return 0;
1229}
1230
1231void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1232 struct msi_msg *msg)
1233{
1234 struct device *dev = msi_desc_to_dev(desc);
1235 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1236 const struct iommu_dma_msi_page *msi_page;
1237
1238 msi_page = msi_desc_get_iommu_cookie(desc);
1239
1240 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1241 return;
1242
1243 msg->address_hi = upper_32_bits(msi_page->iova);
1244 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1245 msg->address_lo += lower_32_bits(msi_page->iova);
Robin Murphy44bb7e22016-09-12 17:13:59 +01001246}
Christoph Hellwig06d60722019-05-20 09:29:29 +02001247
1248static int iommu_dma_init(void)
1249{
1250 return iova_cache_get();
1251}
1252arch_initcall(iommu_dma_init);