blob: 00adeb95ebb9dfb01f1bb87a732d8f6e4bb5a4fe [file] [log] [blame]
Thomas Gleixnerd9523672019-05-29 07:18:01 -07001// SPDX-License-Identifier: GPL-2.0-only
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04002/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04008 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040025 */
26
Joe Perches283c0972013-06-28 03:21:41 -070027#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
Mike Rapoport20132882018-10-30 15:09:21 -070029#include <linux/memblock.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010030#include <linux/dma-direct.h>
Christoph Hellwig9f4df962020-09-22 15:36:11 +020031#include <linux/dma-map-ops.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040032#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040033#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040036#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010037
Stefano Stabellini83862cc2013-10-10 13:40:44 +000038#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000039#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050040
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010041#include <trace/events/swiotlb.h>
Souptick Joardere6fa0dc2019-09-02 14:09:58 +053042#define MAX_DMA_BITS 32
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040043
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040044/*
45 * Quick lookup value of the bus address of the IOTLB.
46 */
47
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070048static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040049{
Julien Grall9435cce2015-09-09 15:18:45 +010050 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070051 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000052
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070053 baddr |= paddr & ~XEN_PAGE_MASK;
54 return baddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040055}
56
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070057static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
58{
59 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
60}
61
62static inline phys_addr_t xen_bus_to_phys(struct device *dev,
63 phys_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040064{
Julien Grall9435cce2015-09-09 15:18:45 +010065 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070066 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
67 (baddr & ~XEN_PAGE_MASK);
Ian Campbelle17b2f12014-01-20 11:30:41 +000068
69 return paddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040070}
71
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070072static inline phys_addr_t xen_dma_to_phys(struct device *dev,
73 dma_addr_t dma_addr)
74{
75 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
76}
77
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000078static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040079{
Juergen Grossbf707262019-06-14 07:46:03 +020080 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
81 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040082
Juergen Grossbf707262019-06-14 07:46:03 +020083 next_bfn = pfn_to_bfn(xen_pfn);
84
85 for (i = 1; i < nr_pages; i++)
86 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
87 return 1;
88
89 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040090}
91
Stefano Stabellini38ba51d2020-07-10 15:34:23 -070092static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040093{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070094 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
Julien Grall9435cce2015-09-09 15:18:45 +010095 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
Stefano Stabellinie9aab7e2020-07-10 15:34:24 -070096 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040097
98 /* If the address is outside our domain, it CAN
99 * have the same virtual address as another address
100 * in our domain. Therefore _only_ check address within our domain.
101 */
Christoph Hellwig16bc75f2021-03-01 08:44:27 +0100102 if (pfn_valid(PFN_DOWN(paddr)))
103 return is_swiotlb_buffer(paddr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400104 return 0;
105}
106
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400107static int
108xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
109{
110 int i, rc;
111 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000112 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000113 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400114
115 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
116
117 i = 0;
118 do {
119 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
120
121 do {
122 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000123 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400124 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000125 dma_bits, &dma_handle);
Souptick Joardere6fa0dc2019-09-02 14:09:58 +0530126 } while (rc && dma_bits++ < MAX_DMA_BITS);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400127 if (rc)
128 return rc;
129
130 i += slabs;
131 } while (i < nslabs);
132 return 0;
133}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400134
135enum xen_swiotlb_err {
136 XEN_SWIOTLB_UNKNOWN = 0,
137 XEN_SWIOTLB_ENOMEM,
138 XEN_SWIOTLB_EFIXUP
139};
140
141static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
142{
143 switch (err) {
144 case XEN_SWIOTLB_ENOMEM:
145 return "Cannot allocate Xen-SWIOTLB buffer\n";
146 case XEN_SWIOTLB_EFIXUP:
147 return "Failed to get contiguous memory for DMA from Xen!\n"\
148 "You either: don't have the permissions, do not have"\
149 " enough free memory under 4GB, or the hypervisor memory"\
150 " is too fragmented!";
151 default:
152 break;
153 }
154 return "";
155}
Christoph Hellwig4035b432021-03-01 08:44:29 +0100156
157#define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE)
158
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100159int __ref xen_swiotlb_init(void)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400160{
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400161 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100162 unsigned long nslabs, bytes, order;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400163 unsigned int repeat = 3;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100164 int rc = -ENOMEM;
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100165 char *start;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400166
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100167 nslabs = swiotlb_nr_tbl();
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100168 if (!nslabs)
169 nslabs = DEFAULT_NSLABS;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100170retry:
171 m_ret = XEN_SWIOTLB_ENOMEM;
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100172 bytes = nslabs << IO_TLB_SHIFT;
Christoph Hellwig4035b432021-03-01 08:44:29 +0100173 order = get_order(bytes);
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700174
175 /*
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400176 * Get IO TLB memory from any location.
177 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400178#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
179#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100180 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
181 start = (void *)xen_get_swiotlb_free_pages(order);
182 if (start)
183 break;
184 order--;
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400185 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100186 if (!start)
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400187 goto error;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100188 if (order != get_order(bytes)) {
189 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
190 (PAGE_SIZE << order) >> 20);
191 nslabs = SLABS_PER_PAGE << order;
192 bytes = nslabs << IO_TLB_SHIFT;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400193 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100194
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400195 /*
196 * And replace that memory with pages under 4GB.
197 */
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100198 rc = xen_swiotlb_fixup(start, bytes, nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400199 if (rc) {
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100200 free_pages((unsigned long)start, order);
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400201 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400202 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400203 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100204 rc = swiotlb_late_init_with_tbl(start, nslabs);
205 if (rc)
206 return rc;
207 swiotlb_set_max_segment(PAGE_SIZE);
208 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400209error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400210 if (repeat--) {
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100211 /* Min is 2MB */
212 nslabs = max(1024UL, (nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700213 pr_info("Lowering to %luMB\n",
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100214 (nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400215 goto retry;
216 }
Joe Perches283c0972013-06-28 03:21:41 -0700217 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100218 free_pages((unsigned long)start, order);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400219 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400220}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200221
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100222#ifdef CONFIG_X86
223void __init xen_swiotlb_init_early(void)
224{
225 unsigned long nslabs, bytes;
226 unsigned int repeat = 3;
227 char *start;
228 int rc;
229
230 nslabs = swiotlb_nr_tbl();
231 if (!nslabs)
232 nslabs = DEFAULT_NSLABS;
233retry:
234 /*
235 * Get IO TLB memory from any location.
236 */
237 bytes = nslabs << IO_TLB_SHIFT;
238 start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
239 if (!start)
240 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
241 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
242
243 /*
244 * And replace that memory with pages under 4GB.
245 */
246 rc = xen_swiotlb_fixup(start, bytes, nslabs);
247 if (rc) {
248 memblock_free(__pa(start), PAGE_ALIGN(bytes));
249 if (repeat--) {
250 /* Min is 2MB */
251 nslabs = max(1024UL, (nslabs >> 1));
252 pr_info("Lowering to %luMB\n",
253 (nslabs << IO_TLB_SHIFT) >> 20);
254 goto retry;
255 }
256 panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
257 }
258
259 if (swiotlb_init_with_tbl(start, nslabs, false))
260 panic("Cannot allocate SWIOTLB buffer");
261 swiotlb_set_max_segment(PAGE_SIZE);
262}
263#endif /* CONFIG_X86 */
264
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200265static void *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400266xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200267 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700268 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400269{
270 void *ret;
271 int order = get_order(size);
272 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400273 phys_addr_t phys;
274 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400275
276 /*
277 * Ignore region specifiers - the kernel's ideas of
278 * pseudo-phys memory layout has nothing to do with the
279 * machine physical layout. We can't allocate highmem
280 * because we can't return a pointer to it.
281 */
282 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
283
Joe Jin7250f422018-10-16 15:21:16 -0700284 /* Convert the size to actually allocated. */
285 size = 1UL << (order + XEN_PAGE_SHIFT);
286
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000287 /* On ARM this function returns an ioremap'ped virtual address for
288 * which virt_to_phys doesn't return the corresponding physical
289 * address. In fact on ARM virt_to_phys only works for kernel direct
290 * mapped RAM memory. Also see comment below.
291 */
292 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400293
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400294 if (!ret)
295 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400296
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400297 if (hwdev && hwdev->coherent_dma_mask)
Christoph Hellwig038d07a2018-03-19 11:38:14 +0100298 dma_mask = hwdev->coherent_dma_mask;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400299
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700300 /* At this point dma_handle is the dma address, next we are
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000301 * going to set it to the machine address.
302 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
303 * to *dma_handle. */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700304 phys = dma_to_phys(hwdev, *dma_handle);
305 dev_addr = xen_phys_to_dma(hwdev, phys);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400306 if (((dev_addr + size - 1 <= dma_mask)) &&
307 !range_straddles_page_boundary(phys, size))
308 *dma_handle = dev_addr;
309 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000310 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000311 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000312 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400313 return NULL;
314 }
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700315 *dma_handle = phys_to_dma(hwdev, *dma_handle);
Juergen Grossb877ac92019-06-14 07:46:04 +0200316 SetPageXenRemapped(virt_to_page(ret));
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400317 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400318 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400319 return ret;
320}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400321
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200322static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400323xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700324 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400325{
326 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400327 phys_addr_t phys;
328 u64 dma_mask = DMA_BIT_MASK(32);
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700329 struct page *page;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400330
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400331 if (hwdev && hwdev->coherent_dma_mask)
332 dma_mask = hwdev->coherent_dma_mask;
333
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000334 /* do not use virt_to_phys because on ARM it doesn't return you the
335 * physical address */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700336 phys = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400337
Joe Jin7250f422018-10-16 15:21:16 -0700338 /* Convert the size to actually allocated. */
339 size = 1UL << (order + XEN_PAGE_SHIFT);
340
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700341 if (is_vmalloc_addr(vaddr))
342 page = vmalloc_to_page(vaddr);
343 else
344 page = virt_to_page(vaddr);
345
Juergen Gross50f63932019-06-14 07:46:02 +0200346 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
Juergen Grossb877ac92019-06-14 07:46:04 +0200347 range_straddles_page_boundary(phys, size)) &&
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700348 TestClearPageXenRemapped(page))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000349 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400350
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700351 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
352 attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400353}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400354
355/*
356 * Map a single buffer of the indicated size for DMA in streaming mode. The
357 * physical address to use is returned.
358 *
359 * Once the device is given the dma address, the device owns this memory until
360 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
361 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200362static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400363 unsigned long offset, size_t size,
364 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700365 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400366{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700367 phys_addr_t map, phys = page_to_phys(page) + offset;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700368 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400369
370 BUG_ON(dir == DMA_NONE);
371 /*
372 * If the address happens to be in the device's DMA window,
373 * we can safely return the device addr and not worry about bounce
374 * buffering it.
375 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100376 if (dma_capable(dev, dev_addr, size, true) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000377 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100378 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Christoph Hellwig063b8272019-04-11 09:20:00 +0200379 swiotlb_force != SWIOTLB_FORCE)
380 goto done;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400381
382 /*
383 * Oh well, have to allocate and map a bounce buffer.
384 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100385 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
386
Christoph Hellwigfc0021a2020-10-23 08:33:09 +0200387 map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
Arnd Bergmann9c106119f2019-06-17 15:28:43 +0200388 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
Christoph Hellwiga4abe0a2018-11-21 19:38:19 +0100389 return DMA_MAPPING_ERROR;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400390
Christoph Hellwigb4dca152019-09-05 10:04:30 +0200391 phys = map;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700392 dev_addr = xen_phys_to_dma(dev, map);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400393
394 /*
395 * Ensure that the address returned is DMA'ble
396 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100397 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
Christoph Hellwig29730732021-03-01 08:44:24 +0100398 swiotlb_tbl_unmap_single(dev, map, size, dir,
Christoph Hellwig063b8272019-04-11 09:20:00 +0200399 attrs | DMA_ATTR_SKIP_CPU_SYNC);
400 return DMA_MAPPING_ERROR;
401 }
Alexander Duyck76418422016-11-02 07:12:47 -0400402
Christoph Hellwig063b8272019-04-11 09:20:00 +0200403done:
Stefano Stabellini63f06202020-07-10 15:34:26 -0700404 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
405 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
406 arch_sync_dma_for_device(phys, size, dir);
407 else
408 xen_dma_sync_for_device(dev, dev_addr, size, dir);
409 }
Christoph Hellwig063b8272019-04-11 09:20:00 +0200410 return dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400411}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400412
413/*
414 * Unmap a single streaming mode DMA translation. The dma_addr and size must
415 * match what was provided for in a previous xen_swiotlb_map_page call. All
416 * other usages are undefined.
417 *
418 * After this call, reads by the cpu to the buffer are guaranteed to see
419 * whatever the device wrote there.
420 */
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200421static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
422 size_t size, enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400423{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700424 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400425
426 BUG_ON(dir == DMA_NONE);
427
Stefano Stabellini63f06202020-07-10 15:34:26 -0700428 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
429 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
430 arch_sync_dma_for_cpu(paddr, size, dir);
431 else
432 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
433 }
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000434
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400435 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700436 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
Christoph Hellwig29730732021-03-01 08:44:24 +0100437 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400438}
439
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400440static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200441xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
442 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400443{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700444 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400445
Stefano Stabellini63f06202020-07-10 15:34:26 -0700446 if (!dev_is_dma_coherent(dev)) {
447 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
448 arch_sync_dma_for_cpu(paddr, size, dir);
449 else
450 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
451 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400452
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700453 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100454 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400455}
456
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200457static void
458xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
459 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400460{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700461 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400462
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700463 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100464 swiotlb_sync_single_for_device(dev, paddr, size, dir);
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200465
Stefano Stabellini63f06202020-07-10 15:34:26 -0700466 if (!dev_is_dma_coherent(dev)) {
467 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
468 arch_sync_dma_for_device(paddr, size, dir);
469 else
470 xen_dma_sync_for_device(dev, dma_addr, size, dir);
471 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400472}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200473
474/*
475 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
476 * concerning calls here are the same as for swiotlb_unmap_page() above.
477 */
478static void
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200479xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
480 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200481{
482 struct scatterlist *sg;
483 int i;
484
485 BUG_ON(dir == DMA_NONE);
486
487 for_each_sg(sgl, sg, nelems, i)
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200488 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
489 dir, attrs);
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200490
491}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400492
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200493static int
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200494xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200495 enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400496{
497 struct scatterlist *sg;
498 int i;
499
500 BUG_ON(dir == DMA_NONE);
501
502 for_each_sg(sgl, sg, nelems, i) {
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200503 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
504 sg->offset, sg->length, dir, attrs);
505 if (sg->dma_address == DMA_MAPPING_ERROR)
506 goto out_unmap;
Stefano Stabellini781575c2013-08-05 17:30:48 +0100507 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400508 }
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200509
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400510 return nelems;
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200511out_unmap:
512 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
513 sg_dma_len(sgl) = 0;
514 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400515}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400516
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400517static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200518xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
519 int nelems, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400520{
521 struct scatterlist *sg;
522 int i;
523
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200524 for_each_sg(sgl, sg, nelems, i) {
525 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
526 sg->length, dir);
527 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400528}
529
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200530static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200531xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400532 int nelems, enum dma_data_direction dir)
533{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200534 struct scatterlist *sg;
535 int i;
536
537 for_each_sg(sgl, sg, nelems, i) {
538 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
539 sg->length, dir);
540 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400541}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400542
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400543/*
544 * Return whether the given device DMA address mask can be supported
545 * properly. For example, if your device can only drive the low 24-bits
546 * during bus mastering, then you would pass 0x00ffffff as the mask to
547 * this function.
548 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200549static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400550xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
551{
Christoph Hellwig6223d1c2021-03-01 08:44:28 +0100552 return xen_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400553}
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000554
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200555const struct dma_map_ops xen_swiotlb_dma_ops = {
556 .alloc = xen_swiotlb_alloc_coherent,
557 .free = xen_swiotlb_free_coherent,
558 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
559 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
560 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
561 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200562 .map_sg = xen_swiotlb_map_sg,
563 .unmap_sg = xen_swiotlb_unmap_sg,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200564 .map_page = xen_swiotlb_map_page,
565 .unmap_page = xen_swiotlb_unmap_page,
566 .dma_supported = xen_swiotlb_dma_supported,
Christoph Hellwig922659e2019-09-02 10:45:39 +0200567 .mmap = dma_common_mmap,
568 .get_sgtable = dma_common_get_sgtable,
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200569 .alloc_pages = dma_common_alloc_pages,
570 .free_pages = dma_common_free_pages,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200571};