blob: adcabd9473eb65c3d6dc7486c5213f49c5f69369 [file] [log] [blame]
Thomas Gleixnerd9523672019-05-29 07:18:01 -07001// SPDX-License-Identifier: GPL-2.0-only
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04002/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04008 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040025 */
26
Joe Perches283c0972013-06-28 03:21:41 -070027#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
Mike Rapoport20132882018-10-30 15:09:21 -070029#include <linux/memblock.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010030#include <linux/dma-direct.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040031#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040032#include <xen/swiotlb-xen.h>
33#include <xen/page.h>
34#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040035#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010036
Stefano Stabellini83862cc2013-10-10 13:40:44 +000037#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000038#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050039
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010040#include <trace/events/swiotlb.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040041/*
42 * Used to do a quick range check in swiotlb_tbl_unmap_single and
43 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
44 * API.
45 */
46
47static char *xen_io_tlb_start, *xen_io_tlb_end;
48static unsigned long xen_io_tlb_nslabs;
49/*
50 * Quick lookup value of the bus address of the IOTLB.
51 */
52
Konrad Rzeszutek Wilkb8b0f552012-08-21 14:49:34 -040053static u64 start_dma_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040054
Ian Campbelle17b2f12014-01-20 11:30:41 +000055/*
Julien Grall9435cce2015-09-09 15:18:45 +010056 * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t
Ian Campbelle17b2f12014-01-20 11:30:41 +000057 * can be 32bit when dma_addr_t is 64bit leading to a loss in
58 * information if the shift is done before casting to 64bit.
59 */
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000060static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040061{
Julien Grall9435cce2015-09-09 15:18:45 +010062 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
63 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000064
Julien Grall9435cce2015-09-09 15:18:45 +010065 dma |= paddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +000066
67 return dma;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040068}
69
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000070static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040071{
Julien Grall9435cce2015-09-09 15:18:45 +010072 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
73 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000074 phys_addr_t paddr = dma;
75
Julien Grall9435cce2015-09-09 15:18:45 +010076 paddr |= baddr & ~XEN_PAGE_MASK;
Ian Campbelle17b2f12014-01-20 11:30:41 +000077
78 return paddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040079}
80
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000081static inline dma_addr_t xen_virt_to_bus(void *address)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040082{
83 return xen_phys_to_bus(virt_to_phys(address));
84}
85
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000086static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040087{
Juergen Grossbf707262019-06-14 07:46:03 +020088 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
89 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040090
Juergen Grossbf707262019-06-14 07:46:03 +020091 next_bfn = pfn_to_bfn(xen_pfn);
92
93 for (i = 1; i < nr_pages; i++)
94 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
95 return 1;
96
97 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040098}
99
100static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
101{
Julien Grall9435cce2015-09-09 15:18:45 +0100102 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
103 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
104 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400105
106 /* If the address is outside our domain, it CAN
107 * have the same virtual address as another address
108 * in our domain. Therefore _only_ check address within our domain.
109 */
Julien Grall9435cce2015-09-09 15:18:45 +0100110 if (pfn_valid(PFN_DOWN(paddr))) {
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400111 return paddr >= virt_to_phys(xen_io_tlb_start) &&
112 paddr < virt_to_phys(xen_io_tlb_end);
113 }
114 return 0;
115}
116
117static int max_dma_bits = 32;
118
119static int
120xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
121{
122 int i, rc;
123 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000124 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000125 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400126
127 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
128
129 i = 0;
130 do {
131 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
132
133 do {
134 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000135 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400136 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000137 dma_bits, &dma_handle);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400138 } while (rc && dma_bits++ < max_dma_bits);
139 if (rc)
140 return rc;
141
142 i += slabs;
143 } while (i < nslabs);
144 return 0;
145}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400146static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400147{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400148 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900149 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
150 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400151 } else
152 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400153
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400154 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
155}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400156
157enum xen_swiotlb_err {
158 XEN_SWIOTLB_UNKNOWN = 0,
159 XEN_SWIOTLB_ENOMEM,
160 XEN_SWIOTLB_EFIXUP
161};
162
163static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
164{
165 switch (err) {
166 case XEN_SWIOTLB_ENOMEM:
167 return "Cannot allocate Xen-SWIOTLB buffer\n";
168 case XEN_SWIOTLB_EFIXUP:
169 return "Failed to get contiguous memory for DMA from Xen!\n"\
170 "You either: don't have the permissions, do not have"\
171 " enough free memory under 4GB, or the hypervisor memory"\
172 " is too fragmented!";
173 default:
174 break;
175 }
176 return "";
177}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400178int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400179{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400180 unsigned long bytes, order;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400181 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400182 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400183 unsigned int repeat = 3;
184
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400185 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400186retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400187 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400188 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700189
190 /*
191 * IO TLB memory already allocated. Just use it.
192 */
193 if (io_tlb_start != 0) {
194 xen_io_tlb_start = phys_to_virt(io_tlb_start);
195 goto end;
196 }
197
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400198 /*
199 * Get IO TLB memory from any location.
200 */
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700201 if (early) {
Mike Rapoport15c3c112018-10-30 15:08:58 -0700202 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
203 PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700204 if (!xen_io_tlb_start)
205 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
206 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
207 } else {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400208#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
209#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
210 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Stefano Stabellini87465152015-04-24 10:16:40 +0100211 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400212 if (xen_io_tlb_start)
213 break;
214 order--;
215 }
216 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700217 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
218 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400219 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
220 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
221 }
222 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400223 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400224 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400225 goto error;
226 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400227 /*
228 * And replace that memory with pages under 4GB.
229 */
230 rc = xen_swiotlb_fixup(xen_io_tlb_start,
231 bytes,
232 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400233 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400234 if (early)
Mike Rapoport20132882018-10-30 15:09:21 -0700235 memblock_free(__pa(xen_io_tlb_start),
236 PAGE_ALIGN(bytes));
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400237 else {
238 free_pages((unsigned long)xen_io_tlb_start, order);
239 xen_io_tlb_start = NULL;
240 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400241 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400242 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400243 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400244 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400245 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800246 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
247 verbose))
248 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400249 rc = 0;
250 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400251 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500252
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700253end:
254 xen_io_tlb_end = xen_io_tlb_start + bytes;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500255 if (!rc)
256 swiotlb_set_max_segment(PAGE_SIZE);
257
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400258 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400259error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400260 if (repeat--) {
261 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
262 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700263 pr_info("Lowering to %luMB\n",
264 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400265 goto retry;
266 }
Joe Perches283c0972013-06-28 03:21:41 -0700267 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400268 if (early)
269 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
270 else
271 free_pages((unsigned long)xen_io_tlb_start, order);
272 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400273}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200274
275static void *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400276xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200277 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700278 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400279{
280 void *ret;
281 int order = get_order(size);
282 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400283 phys_addr_t phys;
284 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400285
286 /*
287 * Ignore region specifiers - the kernel's ideas of
288 * pseudo-phys memory layout has nothing to do with the
289 * machine physical layout. We can't allocate highmem
290 * because we can't return a pointer to it.
291 */
292 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
293
Joe Jin7250f422018-10-16 15:21:16 -0700294 /* Convert the size to actually allocated. */
295 size = 1UL << (order + XEN_PAGE_SHIFT);
296
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000297 /* On ARM this function returns an ioremap'ped virtual address for
298 * which virt_to_phys doesn't return the corresponding physical
299 * address. In fact on ARM virt_to_phys only works for kernel direct
300 * mapped RAM memory. Also see comment below.
301 */
302 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400303
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400304 if (!ret)
305 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400306
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400307 if (hwdev && hwdev->coherent_dma_mask)
Christoph Hellwig038d07a2018-03-19 11:38:14 +0100308 dma_mask = hwdev->coherent_dma_mask;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400309
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000310 /* At this point dma_handle is the physical address, next we are
311 * going to set it to the machine address.
312 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
313 * to *dma_handle. */
314 phys = *dma_handle;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400315 dev_addr = xen_phys_to_bus(phys);
316 if (((dev_addr + size - 1 <= dma_mask)) &&
317 !range_straddles_page_boundary(phys, size))
318 *dma_handle = dev_addr;
319 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000320 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000321 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000322 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400323 return NULL;
324 }
Juergen Grossb877ac92019-06-14 07:46:04 +0200325 SetPageXenRemapped(virt_to_page(ret));
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400326 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400327 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400328 return ret;
329}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400330
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200331static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400332xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700333 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400334{
335 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400336 phys_addr_t phys;
337 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400338
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400339 if (hwdev && hwdev->coherent_dma_mask)
340 dma_mask = hwdev->coherent_dma_mask;
341
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000342 /* do not use virt_to_phys because on ARM it doesn't return you the
343 * physical address */
344 phys = xen_bus_to_phys(dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400345
Joe Jin7250f422018-10-16 15:21:16 -0700346 /* Convert the size to actually allocated. */
347 size = 1UL << (order + XEN_PAGE_SHIFT);
348
Juergen Gross50f63932019-06-14 07:46:02 +0200349 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
Juergen Grossb877ac92019-06-14 07:46:04 +0200350 range_straddles_page_boundary(phys, size)) &&
351 TestClearPageXenRemapped(virt_to_page(vaddr)))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000352 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400353
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000354 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400355}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400356
357/*
358 * Map a single buffer of the indicated size for DMA in streaming mode. The
359 * physical address to use is returned.
360 *
361 * Once the device is given the dma address, the device owns this memory until
362 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
363 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200364static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400365 unsigned long offset, size_t size,
366 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700367 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400368{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700369 phys_addr_t map, phys = page_to_phys(page) + offset;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400370 dma_addr_t dev_addr = xen_phys_to_bus(phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400371
372 BUG_ON(dir == DMA_NONE);
373 /*
374 * If the address happens to be in the device's DMA window,
375 * we can safely return the device addr and not worry about bounce
376 * buffering it.
377 */
378 if (dma_capable(dev, dev_addr, size) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000379 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100380 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Christoph Hellwig063b8272019-04-11 09:20:00 +0200381 swiotlb_force != SWIOTLB_FORCE)
382 goto done;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400383
384 /*
385 * Oh well, have to allocate and map a bounce buffer.
386 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100387 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
388
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800389 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys,
390 size, size, dir, attrs);
Arnd Bergmann9c106119f2019-06-17 15:28:43 +0200391 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
Christoph Hellwiga4abe0a2018-11-21 19:38:19 +0100392 return DMA_MAPPING_ERROR;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400393
Stefano Stabellinif1225ee2017-01-19 10:39:09 -0800394 dev_addr = xen_phys_to_bus(map);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400395
396 /*
397 * Ensure that the address returned is DMA'ble
398 */
Christoph Hellwig063b8272019-04-11 09:20:00 +0200399 if (unlikely(!dma_capable(dev, dev_addr, size))) {
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800400 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
Christoph Hellwig063b8272019-04-11 09:20:00 +0200401 attrs | DMA_ATTR_SKIP_CPU_SYNC);
402 return DMA_MAPPING_ERROR;
403 }
Alexander Duyck76418422016-11-02 07:12:47 -0400404
Christoph Hellwig063b8272019-04-11 09:20:00 +0200405 page = pfn_to_page(map >> PAGE_SHIFT);
406 offset = map & ~PAGE_MASK;
407done:
408 /*
409 * we are not interested in the dma_addr returned by xen_dma_map_page,
410 * only in the potential cache flushes executed by the function.
411 */
412 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
413 return dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400414}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400415
416/*
417 * Unmap a single streaming mode DMA translation. The dma_addr and size must
418 * match what was provided for in a previous xen_swiotlb_map_page call. All
419 * other usages are undefined.
420 *
421 * After this call, reads by the cpu to the buffer are guaranteed to see
422 * whatever the device wrote there.
423 */
424static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000425 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700426 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400427{
428 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
429
430 BUG_ON(dir == DMA_NONE);
431
Stefano Stabellinid6883e62014-11-21 11:09:39 +0000432 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000433
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400434 /* NOTE: We use dev_addr here, not paddr! */
Christoph Hellwig68c60832018-12-06 07:06:04 -0800435 if (is_xen_swiotlb_buffer(dev_addr))
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800436 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400437}
438
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200439static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400440 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700441 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400442{
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000443 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400444}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400445
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400446static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200447xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
448 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400449{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200450 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400451
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200452 xen_dma_sync_single_for_cpu(dev, dma_addr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400453
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200454 if (is_xen_swiotlb_buffer(dma_addr))
455 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400456}
457
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200458static void
459xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
460 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400461{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200462 phys_addr_t paddr = xen_bus_to_phys(dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400463
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200464 if (is_xen_swiotlb_buffer(dma_addr))
465 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
466
467 xen_dma_sync_single_for_device(dev, dma_addr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400468}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200469
470/*
471 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
472 * concerning calls here are the same as for swiotlb_unmap_page() above.
473 */
474static void
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200475xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
476 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200477{
478 struct scatterlist *sg;
479 int i;
480
481 BUG_ON(dir == DMA_NONE);
482
483 for_each_sg(sgl, sg, nelems, i)
484 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
485
486}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400487
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200488static int
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200489xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200490 enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400491{
492 struct scatterlist *sg;
493 int i;
494
495 BUG_ON(dir == DMA_NONE);
496
497 for_each_sg(sgl, sg, nelems, i) {
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200498 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
499 sg->offset, sg->length, dir, attrs);
500 if (sg->dma_address == DMA_MAPPING_ERROR)
501 goto out_unmap;
Stefano Stabellini781575c2013-08-05 17:30:48 +0100502 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400503 }
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200504
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400505 return nelems;
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200506out_unmap:
507 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
508 sg_dma_len(sgl) = 0;
509 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400510}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400511
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400512static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200513xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
514 int nelems, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400515{
516 struct scatterlist *sg;
517 int i;
518
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200519 for_each_sg(sgl, sg, nelems, i) {
520 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
521 sg->length, dir);
522 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400523}
524
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200525static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200526xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400527 int nelems, enum dma_data_direction dir)
528{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200529 struct scatterlist *sg;
530 int i;
531
532 for_each_sg(sgl, sg, nelems, i) {
533 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
534 sg->length, dir);
535 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400536}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400537
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400538/*
539 * Return whether the given device DMA address mask can be supported
540 * properly. For example, if your device can only drive the low 24-bits
541 * during bus mastering, then you would pass 0x00ffffff as the mask to
542 * this function.
543 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200544static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400545xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
546{
547 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
548}
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000549
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200550/*
551 * Create userspace mapping for the DMA-coherent memory.
552 * This function should be called with the pages from the current domain only,
553 * passing pages mapped from other domains would lead to memory corruption.
554 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200555static int
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200556xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
557 void *cpu_addr, dma_addr_t dma_addr, size_t size,
558 unsigned long attrs)
559{
Christoph Hellwig60d8cd52019-01-16 19:01:48 +0100560#ifdef CONFIG_ARM
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700561 if (xen_get_dma_ops(dev)->mmap)
562 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200563 dma_addr, size, attrs);
564#endif
Christoph Hellwig58b04402018-09-11 08:55:28 +0200565 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
Stefano Stabellini7e91c7d2017-02-07 19:58:02 +0200566}
Andrii Anisov69369f52017-02-07 19:58:03 +0200567
568/*
569 * This function should be called with the pages from the current domain only,
570 * passing pages mapped from other domains would lead to memory corruption.
571 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200572static int
Andrii Anisov69369f52017-02-07 19:58:03 +0200573xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
574 void *cpu_addr, dma_addr_t handle, size_t size,
575 unsigned long attrs)
576{
Christoph Hellwig60d8cd52019-01-16 19:01:48 +0100577#ifdef CONFIG_ARM
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700578 if (xen_get_dma_ops(dev)->get_sgtable) {
Andrii Anisov69369f52017-02-07 19:58:03 +0200579#if 0
580 /*
581 * This check verifies that the page belongs to the current domain and
582 * is not one mapped from another domain.
583 * This check is for debug only, and should not go to production build
584 */
585 unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
586 BUG_ON (!page_is_ram(bfn));
587#endif
Stefano Stabellinid5ff5062017-04-13 14:04:22 -0700588 return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
Andrii Anisov69369f52017-02-07 19:58:03 +0200589 handle, size, attrs);
590 }
591#endif
Christoph Hellwig9406a492018-08-23 09:39:38 +0200592 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
Andrii Anisov69369f52017-02-07 19:58:03 +0200593}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200594
595const struct dma_map_ops xen_swiotlb_dma_ops = {
596 .alloc = xen_swiotlb_alloc_coherent,
597 .free = xen_swiotlb_free_coherent,
598 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
599 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
600 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
601 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200602 .map_sg = xen_swiotlb_map_sg,
603 .unmap_sg = xen_swiotlb_unmap_sg,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200604 .map_page = xen_swiotlb_map_page,
605 .unmap_page = xen_swiotlb_unmap_page,
606 .dma_supported = xen_swiotlb_dma_supported,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200607 .mmap = xen_swiotlb_dma_mmap,
608 .get_sgtable = xen_swiotlb_get_sgtable,
609};