blob: f05f3cf9075675a4e466bda1d0621376fb5fea44 [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
Shameer Kolothumf51dc892018-02-13 15:20:51 +000022#include <linux/acpi_iort.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010023#include <linux/device.h>
24#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000025#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010026#include <linux/huge_mm.h>
27#include <linux/iommu.h>
28#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010029#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010030#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010031#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000032#include <linux/scatterlist.h>
33#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010034
Christoph Hellwig81a5a312017-05-22 10:55:30 +020035#define IOMMU_MAPPING_ERROR 0
36
Robin Murphy44bb7e22016-09-12 17:13:59 +010037struct iommu_dma_msi_page {
38 struct list_head list;
39 dma_addr_t iova;
40 phys_addr_t phys;
41};
42
Robin Murphyfdbe5742017-01-19 20:57:46 +000043enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
45 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010046};
47
Robin Murphyfdbe5742017-01-19 20:57:46 +000048struct iommu_dma_cookie {
49 enum iommu_dma_cookie_type type;
50 union {
51 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
52 struct iova_domain iovad;
53 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
54 dma_addr_t msi_iova;
55 };
56 struct list_head msi_page_list;
57 spinlock_t msi_lock;
58};
59
60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
61{
62 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
63 return cookie->iovad.granule;
64 return PAGE_SIZE;
65}
66
Robin Murphyfdbe5742017-01-19 20:57:46 +000067static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
68{
69 struct iommu_dma_cookie *cookie;
70
71 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
72 if (cookie) {
73 spin_lock_init(&cookie->msi_lock);
74 INIT_LIST_HEAD(&cookie->msi_page_list);
75 cookie->type = type;
76 }
77 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010078}
79
Robin Murphy0db2e5d2015-10-01 20:13:58 +010080int iommu_dma_init(void)
81{
82 return iova_cache_get();
83}
84
85/**
86 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
87 * @domain: IOMMU domain to prepare for DMA-API usage
88 *
89 * IOMMU drivers should normally call this from their domain_alloc
90 * callback when domain->type == IOMMU_DOMAIN_DMA.
91 */
92int iommu_get_dma_cookie(struct iommu_domain *domain)
93{
Robin Murphy0db2e5d2015-10-01 20:13:58 +010094 if (domain->iova_cookie)
95 return -EEXIST;
96
Robin Murphyfdbe5742017-01-19 20:57:46 +000097 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
98 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +010099 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100100
Robin Murphy44bb7e22016-09-12 17:13:59 +0100101 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100102}
103EXPORT_SYMBOL(iommu_get_dma_cookie);
104
105/**
Robin Murphyfdbe5742017-01-19 20:57:46 +0000106 * iommu_get_msi_cookie - Acquire just MSI remapping resources
107 * @domain: IOMMU domain to prepare
108 * @base: Start address of IOVA region for MSI mappings
109 *
110 * Users who manage their own IOVA allocation and do not want DMA API support,
111 * but would still like to take advantage of automatic MSI remapping, can use
112 * this to initialise their own domain appropriately. Users should reserve a
113 * contiguous IOVA region, starting at @base, large enough to accommodate the
114 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
115 * used by the devices attached to @domain.
116 */
117int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
118{
119 struct iommu_dma_cookie *cookie;
120
121 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
122 return -EINVAL;
123
124 if (domain->iova_cookie)
125 return -EEXIST;
126
127 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
128 if (!cookie)
129 return -ENOMEM;
130
131 cookie->msi_iova = base;
132 domain->iova_cookie = cookie;
133 return 0;
134}
135EXPORT_SYMBOL(iommu_get_msi_cookie);
136
137/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100138 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000139 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
140 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100141 *
142 * IOMMU drivers should normally call this from their domain_free callback.
143 */
144void iommu_put_dma_cookie(struct iommu_domain *domain)
145{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100146 struct iommu_dma_cookie *cookie = domain->iova_cookie;
147 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100148
Robin Murphy44bb7e22016-09-12 17:13:59 +0100149 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100150 return;
151
Robin Murphyfdbe5742017-01-19 20:57:46 +0000152 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100153 put_iova_domain(&cookie->iovad);
154
155 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
156 list_del(&msi->list);
157 kfree(msi);
158 }
159 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100160 domain->iova_cookie = NULL;
161}
162EXPORT_SYMBOL(iommu_put_dma_cookie);
163
Robin Murphy273df962017-03-16 17:00:19 +0000164/**
165 * iommu_dma_get_resv_regions - Reserved region driver helper
166 * @dev: Device from iommu_get_resv_regions()
167 * @list: Reserved region list from iommu_get_resv_regions()
168 *
169 * IOMMU drivers can use this to implement their .get_resv_regions callback
170 * for general non-IOMMU-specific reservations. Currently, this covers host
Shameer Kolothumf51dc892018-02-13 15:20:51 +0000171 * bridge windows for PCI devices and GICv3 ITS region reservation on ACPI
172 * based ARM platforms that may require HW MSI reservation.
Robin Murphy273df962017-03-16 17:00:19 +0000173 */
174void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100175{
Robin Murphy273df962017-03-16 17:00:19 +0000176 struct pci_host_bridge *bridge;
Robin Murphyfade1ec2016-09-12 17:14:00 +0100177 struct resource_entry *window;
Robin Murphyfade1ec2016-09-12 17:14:00 +0100178
Shameer Kolothumf51dc892018-02-13 15:20:51 +0000179 if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) &&
180 iort_iommu_msi_get_resv_regions(dev, list) < 0)
181 return;
182
Robin Murphy273df962017-03-16 17:00:19 +0000183 if (!dev_is_pci(dev))
184 return;
185
186 bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100187 resource_list_for_each_entry(window, &bridge->windows) {
Robin Murphy273df962017-03-16 17:00:19 +0000188 struct iommu_resv_region *region;
189 phys_addr_t start;
190 size_t length;
191
Robin Murphy938f1bb2017-03-16 17:00:17 +0000192 if (resource_type(window->res) != IORESOURCE_MEM)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100193 continue;
194
Robin Murphy273df962017-03-16 17:00:19 +0000195 start = window->res->start - window->offset;
196 length = window->res->end - window->res->start + 1;
197 region = iommu_alloc_resv_region(start, length, 0,
198 IOMMU_RESV_RESERVED);
199 if (!region)
200 return;
201
202 list_add_tail(&region->list, list);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100203 }
204}
Robin Murphy273df962017-03-16 17:00:19 +0000205EXPORT_SYMBOL(iommu_dma_get_resv_regions);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100206
Robin Murphy7c1b0582017-03-16 17:00:18 +0000207static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
208 phys_addr_t start, phys_addr_t end)
209{
210 struct iova_domain *iovad = &cookie->iovad;
211 struct iommu_dma_msi_page *msi_page;
212 int i, num_pages;
213
214 start -= iova_offset(iovad, start);
215 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
216
217 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
218 if (!msi_page)
219 return -ENOMEM;
220
221 for (i = 0; i < num_pages; i++) {
222 msi_page[i].phys = start;
223 msi_page[i].iova = start;
224 INIT_LIST_HEAD(&msi_page[i].list);
225 list_add(&msi_page[i].list, &cookie->msi_page_list);
226 start += iovad->granule;
227 }
228
229 return 0;
230}
231
232static int iova_reserve_iommu_regions(struct device *dev,
233 struct iommu_domain *domain)
234{
235 struct iommu_dma_cookie *cookie = domain->iova_cookie;
236 struct iova_domain *iovad = &cookie->iovad;
237 struct iommu_resv_region *region;
238 LIST_HEAD(resv_regions);
239 int ret = 0;
240
Robin Murphy7c1b0582017-03-16 17:00:18 +0000241 iommu_get_resv_regions(dev, &resv_regions);
242 list_for_each_entry(region, &resv_regions, list) {
243 unsigned long lo, hi;
244
245 /* We ARE the software that manages these! */
246 if (region->type == IOMMU_RESV_SW_MSI)
247 continue;
248
249 lo = iova_pfn(iovad, region->start);
250 hi = iova_pfn(iovad, region->start + region->length - 1);
251 reserve_iova(iovad, lo, hi);
252
253 if (region->type == IOMMU_RESV_MSI)
254 ret = cookie_init_hw_msi_region(cookie, region->start,
255 region->start + region->length);
256 if (ret)
257 break;
258 }
259 iommu_put_resv_regions(dev, &resv_regions);
260
261 return ret;
262}
263
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100264/**
265 * iommu_dma_init_domain - Initialise a DMA mapping domain
266 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
267 * @base: IOVA at which the mappable address space starts
268 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100269 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100270 *
271 * @base and @size should be exact multiples of IOMMU page granularity to
272 * avoid rounding surprises. If necessary, we reserve the page at address 0
273 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
274 * any change which could make prior IOVAs invalid will fail.
275 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100276int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
277 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100278{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000279 struct iommu_dma_cookie *cookie = domain->iova_cookie;
280 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100281 unsigned long order, base_pfn, end_pfn;
282
Robin Murphyfdbe5742017-01-19 20:57:46 +0000283 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
284 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100285
286 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100287 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100288 base_pfn = max_t(unsigned long, 1, base >> order);
289 end_pfn = (base + size - 1) >> order;
290
291 /* Check the domain allows at least some access to the device... */
292 if (domain->geometry.force_aperture) {
293 if (base > domain->geometry.aperture_end ||
294 base + size <= domain->geometry.aperture_start) {
295 pr_warn("specified DMA range outside IOMMU capability\n");
296 return -EFAULT;
297 }
298 /* ...then finally give it a kicking to make sure it fits */
299 base_pfn = max_t(unsigned long, base_pfn,
300 domain->geometry.aperture_start >> order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100301 }
302
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000303 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100304 if (iovad->start_pfn) {
305 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000306 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100307 pr_warn("Incompatible range for DMA domain\n");
308 return -EFAULT;
309 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000310
311 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100312 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000313
Zhen Leiaa3ac942017-09-21 16:52:45 +0100314 init_iova_domain(iovad, 1UL << order, base_pfn);
Robin Murphy7c1b0582017-03-16 17:00:18 +0000315 if (!dev)
316 return 0;
317
318 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100319}
320EXPORT_SYMBOL(iommu_dma_init_domain);
321
322/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530323 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
324 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100325 * @dir: Direction of DMA transfer
326 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530327 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100328 *
329 * Return: corresponding IOMMU API page protection flags
330 */
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530331int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
332 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100333{
334 int prot = coherent ? IOMMU_CACHE : 0;
335
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530336 if (attrs & DMA_ATTR_PRIVILEGED)
337 prot |= IOMMU_PRIV;
338
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100339 switch (dir) {
340 case DMA_BIDIRECTIONAL:
341 return prot | IOMMU_READ | IOMMU_WRITE;
342 case DMA_TO_DEVICE:
343 return prot | IOMMU_READ;
344 case DMA_FROM_DEVICE:
345 return prot | IOMMU_WRITE;
346 default:
347 return 0;
348 }
349}
350
Robin Murphy842fe512017-03-31 15:46:05 +0100351static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
352 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100353{
Robin Murphya44e6652017-03-31 15:46:06 +0100354 struct iommu_dma_cookie *cookie = domain->iova_cookie;
355 struct iova_domain *iovad = &cookie->iovad;
Robin Murphybb65a642017-03-31 15:46:07 +0100356 unsigned long shift, iova_len, iova = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100357
Robin Murphya44e6652017-03-31 15:46:06 +0100358 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
359 cookie->msi_iova += size;
360 return cookie->msi_iova - size;
361 }
362
363 shift = iova_shift(iovad);
364 iova_len = size >> shift;
Robin Murphybb65a642017-03-31 15:46:07 +0100365 /*
366 * Freeing non-power-of-two-sized allocations back into the IOVA caches
367 * will come back to bite us badly, so we have to waste a bit of space
368 * rounding up anything cacheable to make sure that can't happen. The
369 * order of the unadjusted size will still match upon freeing.
370 */
371 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
372 iova_len = roundup_pow_of_two(iova_len);
Robin Murphya44e6652017-03-31 15:46:06 +0100373
Robin Murphyc987ff02016-08-09 17:31:35 +0100374 if (domain->geometry.force_aperture)
375 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000376
377 /* Try to get PCI devices a SAC address */
378 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200379 iova = alloc_iova_fast(iovad, iova_len,
380 DMA_BIT_MASK(32) >> shift, false);
Robin Murphy122fac02017-01-16 13:24:55 +0000381
Robin Murphybb65a642017-03-31 15:46:07 +0100382 if (!iova)
Tomasz Nowicki538d5b32017-09-20 10:52:02 +0200383 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
384 true);
Robin Murphybb65a642017-03-31 15:46:07 +0100385
386 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100387}
388
Robin Murphy842fe512017-03-31 15:46:05 +0100389static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
390 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100391{
Robin Murphy842fe512017-03-31 15:46:05 +0100392 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100393
Robin Murphya44e6652017-03-31 15:46:06 +0100394 /* The MSI case is only ever cleaning up its most recent allocation */
Robin Murphybb65a642017-03-31 15:46:07 +0100395 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
Robin Murphya44e6652017-03-31 15:46:06 +0100396 cookie->msi_iova -= size;
Robin Murphybb65a642017-03-31 15:46:07 +0100397 else
Robin Murphy1cc896e2017-05-15 16:01:30 +0100398 free_iova_fast(iovad, iova_pfn(iovad, iova),
399 size >> iova_shift(iovad));
Robin Murphy842fe512017-03-31 15:46:05 +0100400}
401
402static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
403 size_t size)
404{
Robin Murphya44e6652017-03-31 15:46:06 +0100405 struct iommu_dma_cookie *cookie = domain->iova_cookie;
406 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy842fe512017-03-31 15:46:05 +0100407 size_t iova_off = iova_offset(iovad, dma_addr);
408
409 dma_addr -= iova_off;
410 size = iova_align(iovad, size + iova_off);
411
412 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
Robin Murphya44e6652017-03-31 15:46:06 +0100413 iommu_dma_free_iova(cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100414}
415
416static void __iommu_dma_free_pages(struct page **pages, int count)
417{
418 while (count--)
419 __free_page(pages[count]);
420 kvfree(pages);
421}
422
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100423static struct page **__iommu_dma_alloc_pages(unsigned int count,
424 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100425{
426 struct page **pages;
427 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100428
429 order_mask &= (2U << MAX_ORDER) - 1;
430 if (!order_mask)
431 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100432
433 if (array_size <= PAGE_SIZE)
434 pages = kzalloc(array_size, GFP_KERNEL);
435 else
436 pages = vzalloc(array_size);
437 if (!pages)
438 return NULL;
439
440 /* IOMMU can map any pages, so himem can also be used here */
441 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
442
443 while (count) {
444 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100445 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100446
447 /*
448 * Higher-order allocations are a convenience rather
449 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100450 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100451 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100452 for (order_mask &= (2U << __fls(count)) - 1;
453 order_mask; order_mask &= ~order_size) {
454 unsigned int order = __fls(order_mask);
455
456 order_size = 1U << order;
457 page = alloc_pages((order_mask - order_size) ?
458 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100459 if (!page)
460 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100461 if (!order)
462 break;
463 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100464 split_page(page, order);
465 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100466 } else if (!split_huge_page(page)) {
467 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100468 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100469 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100470 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100471 if (!page) {
472 __iommu_dma_free_pages(pages, i);
473 return NULL;
474 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100475 count -= order_size;
476 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100477 pages[i++] = page++;
478 }
479 return pages;
480}
481
482/**
483 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
484 * @dev: Device which owns this buffer
485 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
486 * @size: Size of buffer in bytes
487 * @handle: DMA address of buffer
488 *
489 * Frees both the pages associated with the buffer, and the array
490 * describing them
491 */
492void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
493 dma_addr_t *handle)
494{
Robin Murphy842fe512017-03-31 15:46:05 +0100495 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100496 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200497 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100498}
499
500/**
501 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
502 * @dev: Device to allocate memory for. Must be a real device
503 * attached to an iommu_dma_domain
504 * @size: Size of buffer in bytes
505 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100506 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100507 * @prot: IOMMU mapping flags
508 * @handle: Out argument for allocated DMA handle
509 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
510 * given VA/PA are visible to the given non-coherent device.
511 *
512 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
513 * but an IOMMU which supports smaller pages might not map the whole thing.
514 *
515 * Return: Array of struct page pointers describing the buffer,
516 * or NULL on failure.
517 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100518struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700519 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100520 void (*flush_page)(struct device *, const void *, phys_addr_t))
521{
522 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100523 struct iommu_dma_cookie *cookie = domain->iova_cookie;
524 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100525 struct page **pages;
526 struct sg_table sgt;
Robin Murphy842fe512017-03-31 15:46:05 +0100527 dma_addr_t iova;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100528 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100529
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200530 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100531
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100532 min_size = alloc_sizes & -alloc_sizes;
533 if (min_size < PAGE_SIZE) {
534 min_size = PAGE_SIZE;
535 alloc_sizes |= PAGE_SIZE;
536 } else {
537 size = ALIGN(size, min_size);
538 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700539 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100540 alloc_sizes = min_size;
541
542 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
543 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100544 if (!pages)
545 return NULL;
546
Robin Murphy842fe512017-03-31 15:46:05 +0100547 size = iova_align(iovad, size);
548 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100549 if (!iova)
550 goto out_free_pages;
551
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100552 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
553 goto out_free_iova;
554
555 if (!(prot & IOMMU_CACHE)) {
556 struct sg_mapping_iter miter;
557 /*
558 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
559 * sufficient here, so skip it by using the "wrong" direction.
560 */
561 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
562 while (sg_miter_next(&miter))
563 flush_page(dev, miter.addr, page_to_phys(miter.page));
564 sg_miter_stop(&miter);
565 }
566
Robin Murphy842fe512017-03-31 15:46:05 +0100567 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100568 < size)
569 goto out_free_sg;
570
Robin Murphy842fe512017-03-31 15:46:05 +0100571 *handle = iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100572 sg_free_table(&sgt);
573 return pages;
574
575out_free_sg:
576 sg_free_table(&sgt);
577out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100578 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100579out_free_pages:
580 __iommu_dma_free_pages(pages, count);
581 return NULL;
582}
583
584/**
585 * iommu_dma_mmap - Map a buffer into provided user VMA
586 * @pages: Array representing buffer from iommu_dma_alloc()
587 * @size: Size of buffer in bytes
588 * @vma: VMA describing requested userspace mapping
589 *
590 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
591 * for verifying the correct size and protection of @vma beforehand.
592 */
593
594int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
595{
596 unsigned long uaddr = vma->vm_start;
597 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
598 int ret = -ENXIO;
599
600 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
601 ret = vm_insert_page(vma, uaddr, pages[i]);
602 if (ret)
603 break;
604 uaddr += PAGE_SIZE;
605 }
606 return ret;
607}
608
Robin Murphy51f8cc92016-11-14 12:16:26 +0000609static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
610 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100611{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100612 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100613 struct iommu_dma_cookie *cookie = domain->iova_cookie;
Robin Murphy1cc896e2017-05-15 16:01:30 +0100614 size_t iova_off = 0;
Robin Murphy842fe512017-03-31 15:46:05 +0100615 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100616
Robin Murphy1cc896e2017-05-15 16:01:30 +0100617 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
618 iova_off = iova_offset(&cookie->iovad, phys);
619 size = iova_align(&cookie->iovad, size + iova_off);
620 }
621
Robin Murphy842fe512017-03-31 15:46:05 +0100622 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100623 if (!iova)
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200624 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100625
Robin Murphy842fe512017-03-31 15:46:05 +0100626 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
627 iommu_dma_free_iova(cookie, iova, size);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200628 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100629 }
Robin Murphy842fe512017-03-31 15:46:05 +0100630 return iova + iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100631}
632
Robin Murphy51f8cc92016-11-14 12:16:26 +0000633dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
634 unsigned long offset, size_t size, int prot)
635{
636 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
637}
638
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100639void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700640 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100641{
Robin Murphy842fe512017-03-31 15:46:05 +0100642 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100643}
644
645/*
646 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100647 *
648 * At this point the segments are already laid out by iommu_dma_map_sg() to
649 * avoid individually crossing any boundaries, so we merely need to check a
650 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100651 */
652static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
653 dma_addr_t dma_addr)
654{
Robin Murphy809eac52016-04-11 12:32:31 +0100655 struct scatterlist *s, *cur = sg;
656 unsigned long seg_mask = dma_get_seg_boundary(dev);
657 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
658 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100659
660 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100661 /* Restore this segment's original unaligned fields first */
662 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100663 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100664 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100665
Robin Murphy809eac52016-04-11 12:32:31 +0100666 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100667 s->length = s_length;
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200668 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy809eac52016-04-11 12:32:31 +0100669 sg_dma_len(s) = 0;
670
671 /*
672 * Now fill in the real DMA data. If...
673 * - there is a valid output segment to append to
674 * - and this segment starts on an IOVA page boundary
675 * - but doesn't fall at a segment boundary
676 * - and wouldn't make the resulting output segment too long
677 */
678 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
679 (cur_len + s_length <= max_len)) {
680 /* ...then concatenate it with the previous one */
681 cur_len += s_length;
682 } else {
683 /* Otherwise start the next output segment */
684 if (i > 0)
685 cur = sg_next(cur);
686 cur_len = s_length;
687 count++;
688
689 sg_dma_address(cur) = dma_addr + s_iova_off;
690 }
691
692 sg_dma_len(cur) = cur_len;
693 dma_addr += s_iova_len;
694
695 if (s_length + s_iova_off < s_iova_len)
696 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100697 }
Robin Murphy809eac52016-04-11 12:32:31 +0100698 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100699}
700
701/*
702 * If mapping failed, then just restore the original list,
703 * but making sure the DMA fields are invalidated.
704 */
705static void __invalidate_sg(struct scatterlist *sg, int nents)
706{
707 struct scatterlist *s;
708 int i;
709
710 for_each_sg(sg, s, nents, i) {
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200711 if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000712 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100713 if (sg_dma_len(s))
714 s->length = sg_dma_len(s);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200715 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100716 sg_dma_len(s) = 0;
717 }
718}
719
720/*
721 * The DMA API client is passing in a scatterlist which could describe
722 * any old buffer layout, but the IOMMU API requires everything to be
723 * aligned to IOMMU pages. Hence the need for this complicated bit of
724 * impedance-matching, to be able to hand off a suitably-aligned list,
725 * but still preserve the original offsets and sizes for the caller.
726 */
727int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
728 int nents, int prot)
729{
730 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100731 struct iommu_dma_cookie *cookie = domain->iova_cookie;
732 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100733 struct scatterlist *s, *prev = NULL;
Robin Murphy842fe512017-03-31 15:46:05 +0100734 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100735 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100736 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100737 int i;
738
739 /*
740 * Work out how much IOVA space we need, and align the segments to
741 * IOVA granules for the IOMMU driver to handle. With some clever
742 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100743 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100744 */
745 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100746 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100747 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100748 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100749
Robin Murphy809eac52016-04-11 12:32:31 +0100750 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100751 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100752 s->offset -= s_iova_off;
753 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100754 s->length = s_length;
755
756 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100757 * Due to the alignment of our single IOVA allocation, we can
758 * depend on these assumptions about the segment boundary mask:
759 * - If mask size >= IOVA size, then the IOVA range cannot
760 * possibly fall across a boundary, so we don't care.
761 * - If mask size < IOVA size, then the IOVA range must start
762 * exactly on a boundary, therefore we can lay things out
763 * based purely on segment lengths without needing to know
764 * the actual addresses beforehand.
765 * - The mask must be a power of 2, so pad_len == 0 if
766 * iova_len == 0, thus we cannot dereference prev the first
767 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100768 */
Robin Murphy809eac52016-04-11 12:32:31 +0100769 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100770 prev->length += pad_len;
771 iova_len += pad_len;
772 }
773
774 iova_len += s_length;
775 prev = s;
776 }
777
Robin Murphy842fe512017-03-31 15:46:05 +0100778 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100779 if (!iova)
780 goto out_restore_sg;
781
782 /*
783 * We'll leave any physical concatenation to the IOMMU driver's
784 * implementation - it knows better than we do.
785 */
Robin Murphy842fe512017-03-31 15:46:05 +0100786 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100787 goto out_free_iova;
788
Robin Murphy842fe512017-03-31 15:46:05 +0100789 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100790
791out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100792 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100793out_restore_sg:
794 __invalidate_sg(sg, nents);
795 return 0;
796}
797
798void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700799 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100800{
Robin Murphy842fe512017-03-31 15:46:05 +0100801 dma_addr_t start, end;
802 struct scatterlist *tmp;
803 int i;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100804 /*
805 * The scatterlist segments are mapped into a single
806 * contiguous IOVA allocation, so this is incredibly easy.
807 */
Robin Murphy842fe512017-03-31 15:46:05 +0100808 start = sg_dma_address(sg);
809 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
810 if (sg_dma_len(tmp) == 0)
811 break;
812 sg = tmp;
813 }
814 end = sg_dma_address(sg) + sg_dma_len(sg);
815 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100816}
817
Robin Murphy51f8cc92016-11-14 12:16:26 +0000818dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
819 size_t size, enum dma_data_direction dir, unsigned long attrs)
820{
821 return __iommu_dma_map(dev, phys, size,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530822 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000823}
824
825void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
826 size_t size, enum dma_data_direction dir, unsigned long attrs)
827{
Robin Murphy842fe512017-03-31 15:46:05 +0100828 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000829}
830
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100831int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
832{
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200833 return dma_addr == IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100834}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100835
836static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
837 phys_addr_t msi_addr, struct iommu_domain *domain)
838{
839 struct iommu_dma_cookie *cookie = domain->iova_cookie;
840 struct iommu_dma_msi_page *msi_page;
Robin Murphy842fe512017-03-31 15:46:05 +0100841 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100842 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000843 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100844
Robin Murphyfdbe5742017-01-19 20:57:46 +0000845 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100846 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
847 if (msi_page->phys == msi_addr)
848 return msi_page;
849
850 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
851 if (!msi_page)
852 return NULL;
853
Robin Murphya44e6652017-03-31 15:46:06 +0100854 iova = __iommu_dma_map(dev, msi_addr, size, prot);
855 if (iommu_dma_mapping_error(dev, iova))
856 goto out_free_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100857
858 INIT_LIST_HEAD(&msi_page->list);
Robin Murphya44e6652017-03-31 15:46:06 +0100859 msi_page->phys = msi_addr;
860 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100861 list_add(&msi_page->list, &cookie->msi_page_list);
862 return msi_page;
863
Robin Murphy44bb7e22016-09-12 17:13:59 +0100864out_free_page:
865 kfree(msi_page);
866 return NULL;
867}
868
869void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
870{
871 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
872 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
873 struct iommu_dma_cookie *cookie;
874 struct iommu_dma_msi_page *msi_page;
875 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
876 unsigned long flags;
877
878 if (!domain || !domain->iova_cookie)
879 return;
880
881 cookie = domain->iova_cookie;
882
883 /*
884 * We disable IRQs to rule out a possible inversion against
885 * irq_desc_lock if, say, someone tries to retarget the affinity
886 * of an MSI from within an IPI handler.
887 */
888 spin_lock_irqsave(&cookie->msi_lock, flags);
889 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
890 spin_unlock_irqrestore(&cookie->msi_lock, flags);
891
892 if (WARN_ON(!msi_page)) {
893 /*
894 * We're called from a void callback, so the best we can do is
895 * 'fail' by filling the message with obviously bogus values.
896 * Since we got this far due to an IOMMU being present, it's
897 * not like the existing address would have worked anyway...
898 */
899 msg->address_hi = ~0U;
900 msg->address_lo = ~0U;
901 msg->data = ~0U;
902 } else {
903 msg->address_hi = upper_32_bits(msi_page->iova);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000904 msg->address_lo &= cookie_msi_granule(cookie) - 1;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100905 msg->address_lo += lower_32_bits(msi_page->iova);
906 }
907}