blob: 47aebd98f52f551718d9f66656ee57861604343a [file] [log] [blame]
Thomas Gleixnerd9523672019-05-29 07:18:01 -07001// SPDX-License-Identifier: GPL-2.0-only
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04002/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04008 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040025 */
26
Joe Perches283c0972013-06-28 03:21:41 -070027#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
Mike Rapoport20132882018-10-30 15:09:21 -070029#include <linux/memblock.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010030#include <linux/dma-direct.h>
Christoph Hellwig9f4df962020-09-22 15:36:11 +020031#include <linux/dma-map-ops.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040032#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040033#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040036#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010037
Stefano Stabellini83862cc2013-10-10 13:40:44 +000038#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000039#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050040
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010041#include <trace/events/swiotlb.h>
Souptick Joardere6fa0dc2019-09-02 14:09:58 +053042#define MAX_DMA_BITS 32
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040043
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040044/*
45 * Quick lookup value of the bus address of the IOTLB.
46 */
47
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070048static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040049{
Julien Grall9435cce2015-09-09 15:18:45 +010050 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070051 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000052
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070053 baddr |= paddr & ~XEN_PAGE_MASK;
54 return baddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040055}
56
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070057static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
58{
59 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
60}
61
62static inline phys_addr_t xen_bus_to_phys(struct device *dev,
63 phys_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040064{
Julien Grall9435cce2015-09-09 15:18:45 +010065 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070066 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
67 (baddr & ~XEN_PAGE_MASK);
Ian Campbelle17b2f12014-01-20 11:30:41 +000068
69 return paddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040070}
71
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070072static inline phys_addr_t xen_dma_to_phys(struct device *dev,
73 dma_addr_t dma_addr)
74{
75 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
76}
77
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000078static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040079{
Juergen Grossbf707262019-06-14 07:46:03 +020080 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
81 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040082
Juergen Grossbf707262019-06-14 07:46:03 +020083 next_bfn = pfn_to_bfn(xen_pfn);
84
85 for (i = 1; i < nr_pages; i++)
86 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
87 return 1;
88
89 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040090}
91
Stefano Stabellini38ba51d2020-07-10 15:34:23 -070092static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040093{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070094 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
Julien Grall9435cce2015-09-09 15:18:45 +010095 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
Stefano Stabellinie9aab7e2020-07-10 15:34:24 -070096 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040097
98 /* If the address is outside our domain, it CAN
99 * have the same virtual address as another address
100 * in our domain. Therefore _only_ check address within our domain.
101 */
Christoph Hellwig16bc75f2021-03-01 08:44:27 +0100102 if (pfn_valid(PFN_DOWN(paddr)))
Claire Chang7fd856a2021-06-19 11:40:35 +0800103 return is_swiotlb_buffer(dev, paddr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400104 return 0;
105}
106
Christoph Hellwig6bcd4ea2021-03-01 08:44:33 +0100107static int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400108{
Jan Beulichd9a688a2021-09-07 14:05:12 +0200109 int rc;
110 unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
111 unsigned int i, dma_bits = order + PAGE_SHIFT;
Stefano Stabellini69908902013-10-09 16:56:32 +0000112 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000113 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400114
Jan Beulichd9a688a2021-09-07 14:05:12 +0200115 BUILD_BUG_ON(IO_TLB_SEGSIZE & (IO_TLB_SEGSIZE - 1));
116 BUG_ON(nslabs % IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400117
118 i = 0;
119 do {
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400120 do {
121 rc = xen_create_contiguous_region(
Jan Beulichd9a688a2021-09-07 14:05:12 +0200122 p + (i << IO_TLB_SHIFT), order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000123 dma_bits, &dma_handle);
Souptick Joardere6fa0dc2019-09-02 14:09:58 +0530124 } while (rc && dma_bits++ < MAX_DMA_BITS);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400125 if (rc)
126 return rc;
127
Jan Beulichd9a688a2021-09-07 14:05:12 +0200128 i += IO_TLB_SEGSIZE;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400129 } while (i < nslabs);
130 return 0;
131}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400132
133enum xen_swiotlb_err {
134 XEN_SWIOTLB_UNKNOWN = 0,
135 XEN_SWIOTLB_ENOMEM,
136 XEN_SWIOTLB_EFIXUP
137};
138
139static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
140{
141 switch (err) {
142 case XEN_SWIOTLB_ENOMEM:
143 return "Cannot allocate Xen-SWIOTLB buffer\n";
144 case XEN_SWIOTLB_EFIXUP:
145 return "Failed to get contiguous memory for DMA from Xen!\n"\
146 "You either: don't have the permissions, do not have"\
147 " enough free memory under 4GB, or the hypervisor memory"\
148 " is too fragmented!";
149 default:
150 break;
151 }
152 return "";
153}
Christoph Hellwig4035b432021-03-01 08:44:29 +0100154
Jan Beulich68573c12021-09-07 14:06:55 +0200155int xen_swiotlb_init(void)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400156{
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400157 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Christoph Hellwig2d299602021-03-18 17:14:23 +0100158 unsigned long bytes = swiotlb_size_or_default();
159 unsigned long nslabs = bytes >> IO_TLB_SHIFT;
160 unsigned int order, repeat = 3;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100161 int rc = -ENOMEM;
Christoph Hellwigcbce9952021-03-01 08:44:30 +0100162 char *start;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400163
Will Deacon463e8622021-07-20 14:38:24 +0100164 if (io_tlb_default_mem.nslabs) {
Stefano Stabellini97729b62021-05-12 13:18:23 -0700165 pr_warn("swiotlb buffer already initialized\n");
166 return -EEXIST;
167 }
168
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100169retry:
170 m_ret = XEN_SWIOTLB_ENOMEM;
Christoph Hellwig4035b432021-03-01 08:44:29 +0100171 order = get_order(bytes);
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700172
173 /*
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400174 * Get IO TLB memory from any location.
175 */
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400176#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
177#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100178 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
179 start = (void *)xen_get_swiotlb_free_pages(order);
180 if (start)
181 break;
182 order--;
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400183 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100184 if (!start)
Jan Beulich79ca5f72021-09-07 14:05:54 +0200185 goto exit;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100186 if (order != get_order(bytes)) {
187 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
188 (PAGE_SIZE << order) >> 20);
189 nslabs = SLABS_PER_PAGE << order;
190 bytes = nslabs << IO_TLB_SHIFT;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400191 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100192
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400193 /*
194 * And replace that memory with pages under 4GB.
195 */
Christoph Hellwig6bcd4ea2021-03-01 08:44:33 +0100196 rc = xen_swiotlb_fixup(start, nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400197 if (rc) {
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100198 free_pages((unsigned long)start, order);
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400199 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400200 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400201 }
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100202 rc = swiotlb_late_init_with_tbl(start, nslabs);
203 if (rc)
204 return rc;
205 swiotlb_set_max_segment(PAGE_SIZE);
206 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400207error:
Jan Beulichcabb7f82021-09-07 14:06:37 +0200208 if (nslabs > 1024 && repeat--) {
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100209 /* Min is 2MB */
Jan Beulichd9a688a2021-09-07 14:05:12 +0200210 nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
Jan Beulich4c092c52021-09-07 14:04:47 +0200211 bytes = nslabs << IO_TLB_SHIFT;
212 pr_info("Lowering to %luMB\n", bytes >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400213 goto retry;
214 }
Jan Beulich79ca5f72021-09-07 14:05:54 +0200215exit:
Joe Perches283c0972013-06-28 03:21:41 -0700216 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400217 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400218}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200219
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100220#ifdef CONFIG_X86
221void __init xen_swiotlb_init_early(void)
222{
Christoph Hellwig2d299602021-03-18 17:14:23 +0100223 unsigned long bytes = swiotlb_size_or_default();
224 unsigned long nslabs = bytes >> IO_TLB_SHIFT;
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100225 unsigned int repeat = 3;
226 char *start;
227 int rc;
228
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100229retry:
230 /*
231 * Get IO TLB memory from any location.
232 */
Jan Beulich9074c792021-09-17 12:45:49 +0200233 start = memblock_alloc(PAGE_ALIGN(bytes),
234 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100235 if (!start)
Jan Beulich9074c792021-09-17 12:45:49 +0200236 panic("%s: Failed to allocate %lu bytes\n",
237 __func__, PAGE_ALIGN(bytes));
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100238
239 /*
240 * And replace that memory with pages under 4GB.
241 */
Christoph Hellwig6bcd4ea2021-03-01 08:44:33 +0100242 rc = xen_swiotlb_fixup(start, nslabs);
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100243 if (rc) {
Mike Rapoport4421cca2021-11-05 13:43:22 -0700244 memblock_free(start, PAGE_ALIGN(bytes));
Jan Beulichcabb7f82021-09-07 14:06:37 +0200245 if (nslabs > 1024 && repeat--) {
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100246 /* Min is 2MB */
Jan Beulichd9a688a2021-09-07 14:05:12 +0200247 nslabs = max(1024UL, ALIGN(nslabs >> 1, IO_TLB_SEGSIZE));
Christoph Hellwig2d299602021-03-18 17:14:23 +0100248 bytes = nslabs << IO_TLB_SHIFT;
249 pr_info("Lowering to %luMB\n", bytes >> 20);
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100250 goto retry;
251 }
252 panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc);
253 }
254
Jan Beulich7fd880a2021-09-07 14:07:21 +0200255 if (swiotlb_init_with_tbl(start, nslabs, true))
Christoph Hellwiga98f5652021-03-01 08:44:32 +0100256 panic("Cannot allocate SWIOTLB buffer");
257 swiotlb_set_max_segment(PAGE_SIZE);
258}
259#endif /* CONFIG_X86 */
260
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200261static void *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400262xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200263 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700264 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400265{
266 void *ret;
267 int order = get_order(size);
268 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400269 phys_addr_t phys;
270 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400271
272 /*
273 * Ignore region specifiers - the kernel's ideas of
274 * pseudo-phys memory layout has nothing to do with the
275 * machine physical layout. We can't allocate highmem
276 * because we can't return a pointer to it.
277 */
278 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
279
Joe Jin7250f422018-10-16 15:21:16 -0700280 /* Convert the size to actually allocated. */
281 size = 1UL << (order + XEN_PAGE_SHIFT);
282
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000283 /* On ARM this function returns an ioremap'ped virtual address for
284 * which virt_to_phys doesn't return the corresponding physical
285 * address. In fact on ARM virt_to_phys only works for kernel direct
286 * mapped RAM memory. Also see comment below.
287 */
288 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400289
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400290 if (!ret)
291 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400292
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400293 if (hwdev && hwdev->coherent_dma_mask)
Christoph Hellwig038d07a2018-03-19 11:38:14 +0100294 dma_mask = hwdev->coherent_dma_mask;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400295
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700296 /* At this point dma_handle is the dma address, next we are
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000297 * going to set it to the machine address.
298 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
299 * to *dma_handle. */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700300 phys = dma_to_phys(hwdev, *dma_handle);
301 dev_addr = xen_phys_to_dma(hwdev, phys);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400302 if (((dev_addr + size - 1 <= dma_mask)) &&
303 !range_straddles_page_boundary(phys, size))
304 *dma_handle = dev_addr;
305 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000306 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000307 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000308 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400309 return NULL;
310 }
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700311 *dma_handle = phys_to_dma(hwdev, *dma_handle);
Juergen Grossb877ac92019-06-14 07:46:04 +0200312 SetPageXenRemapped(virt_to_page(ret));
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400313 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400314 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400315 return ret;
316}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400317
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200318static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400319xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700320 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400321{
322 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400323 phys_addr_t phys;
324 u64 dma_mask = DMA_BIT_MASK(32);
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700325 struct page *page;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400326
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400327 if (hwdev && hwdev->coherent_dma_mask)
328 dma_mask = hwdev->coherent_dma_mask;
329
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000330 /* do not use virt_to_phys because on ARM it doesn't return you the
331 * physical address */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700332 phys = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400333
Joe Jin7250f422018-10-16 15:21:16 -0700334 /* Convert the size to actually allocated. */
335 size = 1UL << (order + XEN_PAGE_SHIFT);
336
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700337 if (is_vmalloc_addr(vaddr))
338 page = vmalloc_to_page(vaddr);
339 else
340 page = virt_to_page(vaddr);
341
Juergen Gross50f63932019-06-14 07:46:02 +0200342 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
Juergen Grossb877ac92019-06-14 07:46:04 +0200343 range_straddles_page_boundary(phys, size)) &&
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700344 TestClearPageXenRemapped(page))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000345 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400346
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700347 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
348 attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400349}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400350
351/*
352 * Map a single buffer of the indicated size for DMA in streaming mode. The
353 * physical address to use is returned.
354 *
355 * Once the device is given the dma address, the device owns this memory until
356 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
357 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200358static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400359 unsigned long offset, size_t size,
360 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700361 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400362{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700363 phys_addr_t map, phys = page_to_phys(page) + offset;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700364 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400365
366 BUG_ON(dir == DMA_NONE);
367 /*
368 * If the address happens to be in the device's DMA window,
369 * we can safely return the device addr and not worry about bounce
370 * buffering it.
371 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100372 if (dma_capable(dev, dev_addr, size, true) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000373 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100374 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Claire Chang903cd0f2021-06-24 23:55:20 +0800375 !is_swiotlb_force_bounce(dev))
Christoph Hellwig063b8272019-04-11 09:20:00 +0200376 goto done;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400377
378 /*
379 * Oh well, have to allocate and map a bounce buffer.
380 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100381 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
382
David Stevense81e99b2021-09-29 11:32:59 +0900383 map = swiotlb_tbl_map_single(dev, phys, size, size, 0, dir, attrs);
Arnd Bergmann9c106119f2019-06-17 15:28:43 +0200384 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
Christoph Hellwiga4abe0a2018-11-21 19:38:19 +0100385 return DMA_MAPPING_ERROR;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400386
Christoph Hellwigb4dca152019-09-05 10:04:30 +0200387 phys = map;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700388 dev_addr = xen_phys_to_dma(dev, map);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400389
390 /*
391 * Ensure that the address returned is DMA'ble
392 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100393 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
Christoph Hellwig29730732021-03-01 08:44:24 +0100394 swiotlb_tbl_unmap_single(dev, map, size, dir,
Christoph Hellwig063b8272019-04-11 09:20:00 +0200395 attrs | DMA_ATTR_SKIP_CPU_SYNC);
396 return DMA_MAPPING_ERROR;
397 }
Alexander Duyck76418422016-11-02 07:12:47 -0400398
Christoph Hellwig063b8272019-04-11 09:20:00 +0200399done:
Stefano Stabellini63f06202020-07-10 15:34:26 -0700400 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
401 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
402 arch_sync_dma_for_device(phys, size, dir);
403 else
404 xen_dma_sync_for_device(dev, dev_addr, size, dir);
405 }
Christoph Hellwig063b8272019-04-11 09:20:00 +0200406 return dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400407}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400408
409/*
410 * Unmap a single streaming mode DMA translation. The dma_addr and size must
411 * match what was provided for in a previous xen_swiotlb_map_page call. All
412 * other usages are undefined.
413 *
414 * After this call, reads by the cpu to the buffer are guaranteed to see
415 * whatever the device wrote there.
416 */
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200417static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
418 size_t size, enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400419{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700420 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400421
422 BUG_ON(dir == DMA_NONE);
423
Stefano Stabellini63f06202020-07-10 15:34:26 -0700424 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
425 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
426 arch_sync_dma_for_cpu(paddr, size, dir);
427 else
428 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
429 }
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000430
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400431 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700432 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
Christoph Hellwig29730732021-03-01 08:44:24 +0100433 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400434}
435
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400436static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200437xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
438 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400439{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700440 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400441
Stefano Stabellini63f06202020-07-10 15:34:26 -0700442 if (!dev_is_dma_coherent(dev)) {
443 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
444 arch_sync_dma_for_cpu(paddr, size, dir);
445 else
446 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
447 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400448
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700449 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100450 swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400451}
452
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200453static void
454xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
455 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400456{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700457 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400458
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700459 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig80808d22021-03-01 08:44:26 +0100460 swiotlb_sync_single_for_device(dev, paddr, size, dir);
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200461
Stefano Stabellini63f06202020-07-10 15:34:26 -0700462 if (!dev_is_dma_coherent(dev)) {
463 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
464 arch_sync_dma_for_device(paddr, size, dir);
465 else
466 xen_dma_sync_for_device(dev, dma_addr, size, dir);
467 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400468}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200469
470/*
471 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
472 * concerning calls here are the same as for swiotlb_unmap_page() above.
473 */
474static void
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200475xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
476 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200477{
478 struct scatterlist *sg;
479 int i;
480
481 BUG_ON(dir == DMA_NONE);
482
483 for_each_sg(sgl, sg, nelems, i)
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200484 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
485 dir, attrs);
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200486
487}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400488
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200489static int
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200490xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200491 enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400492{
493 struct scatterlist *sg;
494 int i;
495
496 BUG_ON(dir == DMA_NONE);
497
498 for_each_sg(sgl, sg, nelems, i) {
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200499 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
500 sg->offset, sg->length, dir, attrs);
501 if (sg->dma_address == DMA_MAPPING_ERROR)
502 goto out_unmap;
Stefano Stabellini781575c2013-08-05 17:30:48 +0100503 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400504 }
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200505
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400506 return nelems;
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200507out_unmap:
508 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
509 sg_dma_len(sgl) = 0;
Martin Oliveira2c647eb2021-07-29 14:15:35 -0600510 return -EIO;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400511}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400512
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400513static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200514xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
515 int nelems, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400516{
517 struct scatterlist *sg;
518 int i;
519
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200520 for_each_sg(sgl, sg, nelems, i) {
521 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
522 sg->length, dir);
523 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400524}
525
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200526static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200527xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400528 int nelems, enum dma_data_direction dir)
529{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200530 struct scatterlist *sg;
531 int i;
532
533 for_each_sg(sgl, sg, nelems, i) {
534 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
535 sg->length, dir);
536 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400537}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400538
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400539/*
540 * Return whether the given device DMA address mask can be supported
541 * properly. For example, if your device can only drive the low 24-bits
542 * during bus mastering, then you would pass 0x00ffffff as the mask to
543 * this function.
544 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200545static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400546xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
547{
Will Deacon463e8622021-07-20 14:38:24 +0100548 return xen_phys_to_dma(hwdev, io_tlb_default_mem.end - 1) <= mask;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400549}
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000550
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200551const struct dma_map_ops xen_swiotlb_dma_ops = {
552 .alloc = xen_swiotlb_alloc_coherent,
553 .free = xen_swiotlb_free_coherent,
554 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
555 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
556 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
557 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200558 .map_sg = xen_swiotlb_map_sg,
559 .unmap_sg = xen_swiotlb_unmap_sg,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200560 .map_page = xen_swiotlb_map_page,
561 .unmap_page = xen_swiotlb_unmap_page,
562 .dma_supported = xen_swiotlb_dma_supported,
Christoph Hellwig922659e2019-09-02 10:45:39 +0200563 .mmap = dma_common_mmap,
564 .get_sgtable = dma_common_get_sgtable,
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200565 .alloc_pages = dma_common_alloc_pages,
566 .free_pages = dma_common_free_pages,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200567};