blob: 1eac0731c349f2067b42dea6e01f292453096d4b [file] [log] [blame]
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04001/*
2 * Copyright 2010
3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
4 *
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * PV guests under Xen are running in an non-contiguous memory architecture.
17 *
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
21 * operations).
22 *
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
33 *
34 */
35
Joe Perches283c0972013-06-28 03:21:41 -070036#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040038#include <linux/bootmem.h>
39#include <linux/dma-mapping.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040040#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040041#include <xen/swiotlb-xen.h>
42#include <xen/page.h>
43#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040044#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010045
Stefano Stabellini83862cc2013-10-10 13:40:44 +000046#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000047#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050048
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010049#include <trace/events/swiotlb.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040050/*
51 * Used to do a quick range check in swiotlb_tbl_unmap_single and
52 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
53 * API.
54 */
55
Stefano Stabellini83862cc2013-10-10 13:40:44 +000056#ifndef CONFIG_X86
57static unsigned long dma_alloc_coherent_mask(struct device *dev,
58 gfp_t gfp)
59{
60 unsigned long dma_mask = 0;
61
62 dma_mask = dev->coherent_dma_mask;
63 if (!dma_mask)
64 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
65
66 return dma_mask;
67}
68#endif
69
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040070static char *xen_io_tlb_start, *xen_io_tlb_end;
71static unsigned long xen_io_tlb_nslabs;
72/*
73 * Quick lookup value of the bus address of the IOTLB.
74 */
75
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040076static u64 start_dma_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040077
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000078static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040079{
Justin P. Mattock6eab04a2011-04-08 19:49:08 -070080 return phys_to_machine(XPADDR(paddr)).maddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040081}
82
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000083static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040084{
85 return machine_to_phys(XMADDR(baddr)).paddr;
86}
87
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000088static inline dma_addr_t xen_virt_to_bus(void *address)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040089{
90 return xen_phys_to_bus(virt_to_phys(address));
91}
92
93static int check_pages_physically_contiguous(unsigned long pfn,
94 unsigned int offset,
95 size_t length)
96{
97 unsigned long next_mfn;
98 int i;
99 int nr_pages;
100
101 next_mfn = pfn_to_mfn(pfn);
102 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
103
104 for (i = 1; i < nr_pages; i++) {
105 if (pfn_to_mfn(++pfn) != ++next_mfn)
106 return 0;
107 }
108 return 1;
109}
110
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +0000111static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400112{
113 unsigned long pfn = PFN_DOWN(p);
114 unsigned int offset = p & ~PAGE_MASK;
115
116 if (offset + size <= PAGE_SIZE)
117 return 0;
118 if (check_pages_physically_contiguous(pfn, offset, size))
119 return 0;
120 return 1;
121}
122
123static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
124{
125 unsigned long mfn = PFN_DOWN(dma_addr);
126 unsigned long pfn = mfn_to_local_pfn(mfn);
127 phys_addr_t paddr;
128
129 /* If the address is outside our domain, it CAN
130 * have the same virtual address as another address
131 * in our domain. Therefore _only_ check address within our domain.
132 */
133 if (pfn_valid(pfn)) {
134 paddr = PFN_PHYS(pfn);
135 return paddr >= virt_to_phys(xen_io_tlb_start) &&
136 paddr < virt_to_phys(xen_io_tlb_end);
137 }
138 return 0;
139}
140
141static int max_dma_bits = 32;
142
143static int
144xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
145{
146 int i, rc;
147 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000148 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000149 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400150
151 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
152
153 i = 0;
154 do {
155 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
156
157 do {
158 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000159 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400160 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000161 dma_bits, &dma_handle);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400162 } while (rc && dma_bits++ < max_dma_bits);
163 if (rc)
164 return rc;
165
166 i += slabs;
167 } while (i < nslabs);
168 return 0;
169}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400170static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400171{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400172 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900173 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
174 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400175 } else
176 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400177
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400178 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
179}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400180
181enum xen_swiotlb_err {
182 XEN_SWIOTLB_UNKNOWN = 0,
183 XEN_SWIOTLB_ENOMEM,
184 XEN_SWIOTLB_EFIXUP
185};
186
187static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
188{
189 switch (err) {
190 case XEN_SWIOTLB_ENOMEM:
191 return "Cannot allocate Xen-SWIOTLB buffer\n";
192 case XEN_SWIOTLB_EFIXUP:
193 return "Failed to get contiguous memory for DMA from Xen!\n"\
194 "You either: don't have the permissions, do not have"\
195 " enough free memory under 4GB, or the hypervisor memory"\
196 " is too fragmented!";
197 default:
198 break;
199 }
200 return "";
201}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400202int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400203{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400204 unsigned long bytes, order;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400205 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400206 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400207 unsigned int repeat = 3;
208
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400209 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400210retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400211 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400212 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400213 /*
214 * Get IO TLB memory from any location.
215 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400216 if (early)
217 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
218 else {
219#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
220#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
221 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
222 xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
223 if (xen_io_tlb_start)
224 break;
225 order--;
226 }
227 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700228 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
229 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400230 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
231 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
232 }
233 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400234 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400235 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400236 goto error;
237 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400238 xen_io_tlb_end = xen_io_tlb_start + bytes;
239 /*
240 * And replace that memory with pages under 4GB.
241 */
242 rc = xen_swiotlb_fixup(xen_io_tlb_start,
243 bytes,
244 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400245 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400246 if (early)
247 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
248 else {
249 free_pages((unsigned long)xen_io_tlb_start, order);
250 xen_io_tlb_start = NULL;
251 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400252 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400253 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400254 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400255 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400256 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800257 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
258 verbose))
259 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400260 rc = 0;
261 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400262 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
263 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400264error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400265 if (repeat--) {
266 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
267 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700268 pr_info("Lowering to %luMB\n",
269 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400270 goto retry;
271 }
Joe Perches283c0972013-06-28 03:21:41 -0700272 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400273 if (early)
274 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
275 else
276 free_pages((unsigned long)xen_io_tlb_start, order);
277 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400278}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400279void *
280xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200281 dma_addr_t *dma_handle, gfp_t flags,
282 struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400283{
284 void *ret;
285 int order = get_order(size);
286 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400287 phys_addr_t phys;
288 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400289
290 /*
291 * Ignore region specifiers - the kernel's ideas of
292 * pseudo-phys memory layout has nothing to do with the
293 * machine physical layout. We can't allocate highmem
294 * because we can't return a pointer to it.
295 */
296 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
297
298 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
299 return ret;
300
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000301 /* On ARM this function returns an ioremap'ped virtual address for
302 * which virt_to_phys doesn't return the corresponding physical
303 * address. In fact on ARM virt_to_phys only works for kernel direct
304 * mapped RAM memory. Also see comment below.
305 */
306 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400307
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400308 if (!ret)
309 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400310
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400311 if (hwdev && hwdev->coherent_dma_mask)
Ronny Hegewaldb5031ed2012-08-31 09:57:52 +0000312 dma_mask = dma_alloc_coherent_mask(hwdev, flags);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400313
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000314 /* At this point dma_handle is the physical address, next we are
315 * going to set it to the machine address.
316 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
317 * to *dma_handle. */
318 phys = *dma_handle;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400319 dev_addr = xen_phys_to_bus(phys);
320 if (((dev_addr + size - 1 <= dma_mask)) &&
321 !range_straddles_page_boundary(phys, size))
322 *dma_handle = dev_addr;
323 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000324 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000325 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000326 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400327 return NULL;
328 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400329 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400330 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400331 return ret;
332}
333EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
334
335void
336xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200337 dma_addr_t dev_addr, struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400338{
339 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400340 phys_addr_t phys;
341 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400342
343 if (dma_release_from_coherent(hwdev, order, vaddr))
344 return;
345
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400346 if (hwdev && hwdev->coherent_dma_mask)
347 dma_mask = hwdev->coherent_dma_mask;
348
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000349 /* do not use virt_to_phys because on ARM it doesn't return you the
350 * physical address */
351 phys = xen_bus_to_phys(dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400352
353 if (((dev_addr + size - 1 > dma_mask)) ||
354 range_straddles_page_boundary(phys, size))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000355 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400356
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000357 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400358}
359EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
360
361
362/*
363 * Map a single buffer of the indicated size for DMA in streaming mode. The
364 * physical address to use is returned.
365 *
366 * Once the device is given the dma address, the device owns this memory until
367 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
368 */
369dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
370 unsigned long offset, size_t size,
371 enum dma_data_direction dir,
372 struct dma_attrs *attrs)
373{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700374 phys_addr_t map, phys = page_to_phys(page) + offset;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400375 dma_addr_t dev_addr = xen_phys_to_bus(phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400376
377 BUG_ON(dir == DMA_NONE);
378 /*
379 * If the address happens to be in the device's DMA window,
380 * we can safely return the device addr and not worry about bounce
381 * buffering it.
382 */
383 if (dma_capable(dev, dev_addr, size) &&
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000384 !range_straddles_page_boundary(phys, size) && !swiotlb_force) {
385 /* we are not interested in the dma_addr returned by
386 * xen_dma_map_page, only in the potential cache flushes executed
387 * by the function. */
388 xen_dma_map_page(dev, page, offset, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400389 return dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000390 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400391
392 /*
393 * Oh well, have to allocate and map a bounce buffer.
394 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100395 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
396
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400397 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700398 if (map == SWIOTLB_MAP_ERROR)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400399 return DMA_ERROR_CODE;
400
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000401 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
402 map & ~PAGE_MASK, size, dir, attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700403 dev_addr = xen_phys_to_bus(map);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400404
405 /*
406 * Ensure that the address returned is DMA'ble
407 */
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400408 if (!dma_capable(dev, dev_addr, size)) {
Alexander Duyck61ca08c2012-10-15 10:19:44 -0700409 swiotlb_tbl_unmap_single(dev, map, size, dir);
Konrad Rzeszutek Wilkab2a47b2011-07-22 12:51:48 -0400410 dev_addr = 0;
411 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400412 return dev_addr;
413}
414EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
415
416/*
417 * Unmap a single streaming mode DMA translation. The dma_addr and size must
418 * match what was provided for in a previous xen_swiotlb_map_page call. All
419 * other usages are undefined.
420 *
421 * After this call, reads by the cpu to the buffer are guaranteed to see
422 * whatever the device wrote there.
423 */
424static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000425 size_t size, enum dma_data_direction dir,
426 struct dma_attrs *attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400427{
428 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
429
430 BUG_ON(dir == DMA_NONE);
431
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000432 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs);
433
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400434 /* NOTE: We use dev_addr here, not paddr! */
435 if (is_xen_swiotlb_buffer(dev_addr)) {
Alexander Duyck61ca08c2012-10-15 10:19:44 -0700436 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400437 return;
438 }
439
440 if (dir != DMA_FROM_DEVICE)
441 return;
442
443 /*
444 * phys_to_virt doesn't work with hihgmem page but we could
445 * call dma_mark_clean() with hihgmem page here. However, we
446 * are fine since dma_mark_clean() is null on POWERPC. We can
447 * make dma_mark_clean() take a physical address if necessary.
448 */
449 dma_mark_clean(phys_to_virt(paddr), size);
450}
451
452void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
453 size_t size, enum dma_data_direction dir,
454 struct dma_attrs *attrs)
455{
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000456 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400457}
458EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
459
460/*
461 * Make physical memory consistent for a single streaming mode DMA translation
462 * after a transfer.
463 *
464 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
465 * using the cpu, yet do not wish to teardown the dma mapping, you must
466 * call this function before doing so. At the next point you give the dma
467 * address back to the card, you must first perform a
468 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
469 */
470static void
471xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
472 size_t size, enum dma_data_direction dir,
473 enum dma_sync_target target)
474{
475 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
476
477 BUG_ON(dir == DMA_NONE);
478
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000479 if (target == SYNC_FOR_CPU)
480 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
481
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400482 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000483 if (is_xen_swiotlb_buffer(dev_addr))
Alexander Duyckfbfda892012-10-15 10:19:49 -0700484 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000485
486 if (target == SYNC_FOR_DEVICE)
487 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400488
489 if (dir != DMA_FROM_DEVICE)
490 return;
491
492 dma_mark_clean(phys_to_virt(paddr), size);
493}
494
495void
496xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
497 size_t size, enum dma_data_direction dir)
498{
499 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
500}
501EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
502
503void
504xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
505 size_t size, enum dma_data_direction dir)
506{
507 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
508}
509EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
510
511/*
512 * Map a set of buffers described by scatterlist in streaming mode for DMA.
513 * This is the scatter-gather version of the above xen_swiotlb_map_page
514 * interface. Here the scatter gather list elements are each tagged with the
515 * appropriate dma address and length. They are obtained via
516 * sg_dma_{address,length}(SG).
517 *
518 * NOTE: An implementation may be able to use a smaller number of
519 * DMA address/length pairs than there are SG table elements.
520 * (for example via virtual mapping capabilities)
521 * The routine returns the number of addr/length pairs actually
522 * used, at most nents.
523 *
524 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
525 * same here.
526 */
527int
528xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
529 int nelems, enum dma_data_direction dir,
530 struct dma_attrs *attrs)
531{
532 struct scatterlist *sg;
533 int i;
534
535 BUG_ON(dir == DMA_NONE);
536
537 for_each_sg(sgl, sg, nelems, i) {
538 phys_addr_t paddr = sg_phys(sg);
539 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
540
541 if (swiotlb_force ||
542 !dma_capable(hwdev, dev_addr, sg->length) ||
543 range_straddles_page_boundary(paddr, sg->length)) {
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700544 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
545 start_dma_addr,
546 sg_phys(sg),
547 sg->length,
548 dir);
549 if (map == SWIOTLB_MAP_ERROR) {
Stefano Stabellini783d0282013-10-25 10:33:26 +0000550 dev_warn(hwdev, "swiotlb buffer is full\n");
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400551 /* Don't panic here, we expect map_sg users
552 to do proper error handling. */
553 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
554 attrs);
Stefano Stabellini781575c2013-08-05 17:30:48 +0100555 sg_dma_len(sgl) = 0;
Stefano Stabellini15177602013-10-29 00:37:37 +0000556 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400557 }
Stefano Stabellini71bfae92013-11-15 14:22:15 +0000558 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
559 map & ~PAGE_MASK,
560 sg->length,
561 dir,
562 attrs);
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700563 sg->dma_address = xen_phys_to_bus(map);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000564 } else {
565 /* we are not interested in the dma_addr returned by
566 * xen_dma_map_page, only in the potential cache flushes executed
567 * by the function. */
568 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
569 paddr & ~PAGE_MASK,
570 sg->length,
571 dir,
572 attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400573 sg->dma_address = dev_addr;
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000574 }
Stefano Stabellini781575c2013-08-05 17:30:48 +0100575 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400576 }
577 return nelems;
578}
579EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
580
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400581/*
582 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
583 * concerning calls here are the same as for swiotlb_unmap_page() above.
584 */
585void
586xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
587 int nelems, enum dma_data_direction dir,
588 struct dma_attrs *attrs)
589{
590 struct scatterlist *sg;
591 int i;
592
593 BUG_ON(dir == DMA_NONE);
594
595 for_each_sg(sgl, sg, nelems, i)
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000596 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400597
598}
599EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
600
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400601/*
602 * Make physical memory consistent for a set of streaming mode DMA translations
603 * after a transfer.
604 *
605 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
606 * and usage.
607 */
608static void
609xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
610 int nelems, enum dma_data_direction dir,
611 enum dma_sync_target target)
612{
613 struct scatterlist *sg;
614 int i;
615
616 for_each_sg(sgl, sg, nelems, i)
617 xen_swiotlb_sync_single(hwdev, sg->dma_address,
Stefano Stabellini781575c2013-08-05 17:30:48 +0100618 sg_dma_len(sg), dir, target);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400619}
620
621void
622xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
623 int nelems, enum dma_data_direction dir)
624{
625 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
626}
627EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
628
629void
630xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
631 int nelems, enum dma_data_direction dir)
632{
633 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
634}
635EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
636
637int
638xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
639{
640 return !dma_addr;
641}
642EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
643
644/*
645 * Return whether the given device DMA address mask can be supported
646 * properly. For example, if your device can only drive the low 24-bits
647 * during bus mastering, then you would pass 0x00ffffff as the mask to
648 * this function.
649 */
650int
651xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
652{
653 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
654}
655EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000656
657int
658xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
659{
660 if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
661 return -EIO;
662
663 *dev->dma_mask = dma_mask;
664
665 return 0;
666}
667EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);