blob: 030a225624b060b2b9356163d182ffc162ad1d8d [file] [log] [blame]
Thomas Gleixnerd9523672019-05-29 07:18:01 -07001// SPDX-License-Identifier: GPL-2.0-only
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04002/*
3 * Copyright 2010
4 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 *
6 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -04008 * PV guests under Xen are running in an non-contiguous memory architecture.
9 *
10 * When PCI pass-through is utilized, this necessitates an IOMMU for
11 * translating bus (DMA) to virtual and vice-versa and also providing a
12 * mechanism to have contiguous pages for device drivers operations (say DMA
13 * operations).
14 *
15 * Specifically, under Xen the Linux idea of pages is an illusion. It
16 * assumes that pages start at zero and go up to the available memory. To
17 * help with that, the Linux Xen MMU provides a lookup mechanism to
18 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
19 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
20 * memory is not contiguous. Xen hypervisor stitches memory for guests
21 * from different pools, which means there is no guarantee that PFN==MFN
22 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
23 * allocated in descending order (high to low), meaning the guest might
24 * never get any MFN's under the 4GB mark.
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040025 */
26
Joe Perches283c0972013-06-28 03:21:41 -070027#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
28
Mike Rapoport20132882018-10-30 15:09:21 -070029#include <linux/memblock.h>
Christoph Hellwigea8c64a2018-01-10 16:21:13 +010030#include <linux/dma-direct.h>
Christoph Hellwigb4dca152019-09-05 10:04:30 +020031#include <linux/dma-noncoherent.h>
Paul Gortmaker63c97442011-07-10 13:22:07 -040032#include <linux/export.h>
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040033#include <xen/swiotlb-xen.h>
34#include <xen/page.h>
35#include <xen/xen-ops.h>
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -040036#include <xen/hvc-console.h>
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010037
Stefano Stabellini83862cc2013-10-10 13:40:44 +000038#include <asm/dma-mapping.h>
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +000039#include <asm/xen/page-coherent.h>
Konrad Rzeszutek Wilke1d8f622013-11-08 15:36:09 -050040
Zoltan Kiss2b2b6142013-09-04 21:11:05 +010041#include <trace/events/swiotlb.h>
Souptick Joardere6fa0dc2019-09-02 14:09:58 +053042#define MAX_DMA_BITS 32
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040043/*
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
46 * API.
47 */
48
49static char *xen_io_tlb_start, *xen_io_tlb_end;
50static unsigned long xen_io_tlb_nslabs;
51/*
52 * Quick lookup value of the bus address of the IOTLB.
53 */
54
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070055static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040056{
Julien Grall9435cce2015-09-09 15:18:45 +010057 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070058 phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
Ian Campbelle17b2f12014-01-20 11:30:41 +000059
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070060 baddr |= paddr & ~XEN_PAGE_MASK;
61 return baddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040062}
63
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070064static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
65{
66 return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
67}
68
69static inline phys_addr_t xen_bus_to_phys(struct device *dev,
70 phys_addr_t baddr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040071{
Julien Grall9435cce2015-09-09 15:18:45 +010072 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070073 phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
74 (baddr & ~XEN_PAGE_MASK);
Ian Campbelle17b2f12014-01-20 11:30:41 +000075
76 return paddr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040077}
78
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070079static inline phys_addr_t xen_dma_to_phys(struct device *dev,
80 dma_addr_t dma_addr)
81{
82 return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
83}
84
Stefano Stabellini2cf6a912020-07-10 15:34:19 -070085static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040086{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -070087 return xen_phys_to_dma(dev, virt_to_phys(address));
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040088}
89
Stefano Stabellini6b42a7e2013-10-25 10:33:27 +000090static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040091{
Juergen Grossbf707262019-06-14 07:46:03 +020092 unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
93 unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -040094
Juergen Grossbf707262019-06-14 07:46:03 +020095 next_bfn = pfn_to_bfn(xen_pfn);
96
97 for (i = 1; i < nr_pages; i++)
98 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
99 return 1;
100
101 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400102}
103
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700104static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400105{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700106 unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
Julien Grall9435cce2015-09-09 15:18:45 +0100107 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
Stefano Stabellinie9aab7e2020-07-10 15:34:24 -0700108 phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400109
110 /* If the address is outside our domain, it CAN
111 * have the same virtual address as another address
112 * in our domain. Therefore _only_ check address within our domain.
113 */
Julien Grall9435cce2015-09-09 15:18:45 +0100114 if (pfn_valid(PFN_DOWN(paddr))) {
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400115 return paddr >= virt_to_phys(xen_io_tlb_start) &&
116 paddr < virt_to_phys(xen_io_tlb_end);
117 }
118 return 0;
119}
120
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400121static int
122xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
123{
124 int i, rc;
125 int dma_bits;
Stefano Stabellini69908902013-10-09 16:56:32 +0000126 dma_addr_t dma_handle;
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000127 phys_addr_t p = virt_to_phys(buf);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400128
129 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
130
131 i = 0;
132 do {
133 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
134
135 do {
136 rc = xen_create_contiguous_region(
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000137 p + (i << IO_TLB_SHIFT),
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400138 get_order(slabs << IO_TLB_SHIFT),
Stefano Stabellini69908902013-10-09 16:56:32 +0000139 dma_bits, &dma_handle);
Souptick Joardere6fa0dc2019-09-02 14:09:58 +0530140 } while (rc && dma_bits++ < MAX_DMA_BITS);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400141 if (rc)
142 return rc;
143
144 i += slabs;
145 } while (i < nslabs);
146 return 0;
147}
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400148static unsigned long xen_set_nslabs(unsigned long nr_tbl)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400149{
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400150 if (!nr_tbl) {
FUJITA Tomonori5f98ecd2011-06-05 11:47:29 +0900151 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
152 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400153 } else
154 xen_io_tlb_nslabs = nr_tbl;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400155
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400156 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
157}
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400158
159enum xen_swiotlb_err {
160 XEN_SWIOTLB_UNKNOWN = 0,
161 XEN_SWIOTLB_ENOMEM,
162 XEN_SWIOTLB_EFIXUP
163};
164
165static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
166{
167 switch (err) {
168 case XEN_SWIOTLB_ENOMEM:
169 return "Cannot allocate Xen-SWIOTLB buffer\n";
170 case XEN_SWIOTLB_EFIXUP:
171 return "Failed to get contiguous memory for DMA from Xen!\n"\
172 "You either: don't have the permissions, do not have"\
173 " enough free memory under 4GB, or the hypervisor memory"\
174 " is too fragmented!";
175 default:
176 break;
177 }
178 return "";
179}
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400180int __ref xen_swiotlb_init(int verbose, bool early)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400181{
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400182 unsigned long bytes, order;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400183 int rc = -ENOMEM;
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400184 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400185 unsigned int repeat = 3;
186
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400187 xen_io_tlb_nslabs = swiotlb_nr_tbl();
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400188retry:
Konrad Rzeszutek Wilk1cef36a2012-08-23 13:55:26 -0400189 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400190 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700191
192 /*
193 * IO TLB memory already allocated. Just use it.
194 */
195 if (io_tlb_start != 0) {
196 xen_io_tlb_start = phys_to_virt(io_tlb_start);
197 goto end;
198 }
199
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400200 /*
201 * Get IO TLB memory from any location.
202 */
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700203 if (early) {
Mike Rapoport15c3c112018-10-30 15:08:58 -0700204 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
205 PAGE_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700206 if (!xen_io_tlb_start)
207 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
208 __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
209 } else {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400210#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
211#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
Stefano Stabellini87465152015-04-24 10:16:40 +0100213 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400214 if (xen_io_tlb_start)
215 break;
216 order--;
217 }
218 if (order != get_order(bytes)) {
Joe Perches283c0972013-06-28 03:21:41 -0700219 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
220 (PAGE_SIZE << order) >> 20);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400221 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
222 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
223 }
224 }
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400225 if (!xen_io_tlb_start) {
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400226 m_ret = XEN_SWIOTLB_ENOMEM;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400227 goto error;
228 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400229 /*
230 * And replace that memory with pages under 4GB.
231 */
232 rc = xen_swiotlb_fixup(xen_io_tlb_start,
233 bytes,
234 xen_io_tlb_nslabs);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400235 if (rc) {
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400236 if (early)
Mike Rapoport20132882018-10-30 15:09:21 -0700237 memblock_free(__pa(xen_io_tlb_start),
238 PAGE_ALIGN(bytes));
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400239 else {
240 free_pages((unsigned long)xen_io_tlb_start, order);
241 xen_io_tlb_start = NULL;
242 }
Konrad Rzeszutek Wilk5bab7862012-08-23 14:03:55 -0400243 m_ret = XEN_SWIOTLB_EFIXUP;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400244 goto error;
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400245 }
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400246 if (early) {
Yinghai Luac2cbab2013-01-24 12:20:16 -0800247 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
248 verbose))
249 panic("Cannot allocate SWIOTLB buffer");
Konrad Rzeszutek Wilkc468bde2012-09-17 10:20:09 -0400250 rc = 0;
251 } else
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400252 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500253
Stefano Stabellini4e7372e2019-05-28 15:48:22 -0700254end:
255 xen_io_tlb_end = xen_io_tlb_start + bytes;
Konrad Rzeszutek Wilk7453c542016-12-20 10:02:02 -0500256 if (!rc)
257 swiotlb_set_max_segment(PAGE_SIZE);
258
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400259 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400260error:
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400261 if (repeat--) {
262 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
263 (xen_io_tlb_nslabs >> 1));
Joe Perches283c0972013-06-28 03:21:41 -0700264 pr_info("Lowering to %luMB\n",
265 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
Konrad Rzeszutek Wilkf4b2f072011-07-22 12:46:43 -0400266 goto retry;
267 }
Joe Perches283c0972013-06-28 03:21:41 -0700268 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
Konrad Rzeszutek Wilkb8277602012-08-23 14:36:15 -0400269 if (early)
270 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
271 else
272 free_pages((unsigned long)xen_io_tlb_start, order);
273 return rc;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400274}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200275
276static void *
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400277xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
Andrzej Pietrasiewiczbaa676f2012-03-27 14:28:18 +0200278 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700279 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400280{
281 void *ret;
282 int order = get_order(size);
283 u64 dma_mask = DMA_BIT_MASK(32);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400284 phys_addr_t phys;
285 dma_addr_t dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400286
287 /*
288 * Ignore region specifiers - the kernel's ideas of
289 * pseudo-phys memory layout has nothing to do with the
290 * machine physical layout. We can't allocate highmem
291 * because we can't return a pointer to it.
292 */
293 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
294
Joe Jin7250f422018-10-16 15:21:16 -0700295 /* Convert the size to actually allocated. */
296 size = 1UL << (order + XEN_PAGE_SHIFT);
297
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000298 /* On ARM this function returns an ioremap'ped virtual address for
299 * which virt_to_phys doesn't return the corresponding physical
300 * address. In fact on ARM virt_to_phys only works for kernel direct
301 * mapped RAM memory. Also see comment below.
302 */
303 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400304
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400305 if (!ret)
306 return ret;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400307
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400308 if (hwdev && hwdev->coherent_dma_mask)
Christoph Hellwig038d07a2018-03-19 11:38:14 +0100309 dma_mask = hwdev->coherent_dma_mask;
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400310
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700311 /* At this point dma_handle is the dma address, next we are
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000312 * going to set it to the machine address.
313 * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
314 * to *dma_handle. */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700315 phys = dma_to_phys(hwdev, *dma_handle);
316 dev_addr = xen_phys_to_dma(hwdev, phys);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400317 if (((dev_addr + size - 1 <= dma_mask)) &&
318 !range_straddles_page_boundary(phys, size))
319 *dma_handle = dev_addr;
320 else {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000321 if (xen_create_contiguous_region(phys, order,
Stefano Stabellini69908902013-10-09 16:56:32 +0000322 fls64(dma_mask), dma_handle) != 0) {
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000323 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400324 return NULL;
325 }
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700326 *dma_handle = phys_to_dma(hwdev, *dma_handle);
Juergen Grossb877ac92019-06-14 07:46:04 +0200327 SetPageXenRemapped(virt_to_page(ret));
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400328 }
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400329 memset(ret, 0, size);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400330 return ret;
331}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400332
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200333static void
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400334xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700335 dma_addr_t dev_addr, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400336{
337 int order = get_order(size);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400338 phys_addr_t phys;
339 u64 dma_mask = DMA_BIT_MASK(32);
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700340 struct page *page;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400341
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400342 if (hwdev && hwdev->coherent_dma_mask)
343 dma_mask = hwdev->coherent_dma_mask;
344
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000345 /* do not use virt_to_phys because on ARM it doesn't return you the
346 * physical address */
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700347 phys = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400348
Joe Jin7250f422018-10-16 15:21:16 -0700349 /* Convert the size to actually allocated. */
350 size = 1UL << (order + XEN_PAGE_SHIFT);
351
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700352 if (is_vmalloc_addr(vaddr))
353 page = vmalloc_to_page(vaddr);
354 else
355 page = virt_to_page(vaddr);
356
Juergen Gross50f63932019-06-14 07:46:02 +0200357 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
Juergen Grossb877ac92019-06-14 07:46:04 +0200358 range_straddles_page_boundary(phys, size)) &&
Boris Ostrovsky8b1e8682020-07-10 15:34:17 -0700359 TestClearPageXenRemapped(page))
Stefano Stabellini1b65c4e2013-10-10 13:41:10 +0000360 xen_destroy_contiguous_region(phys, order);
Konrad Rzeszutek Wilk6810df82011-08-25 16:13:54 -0400361
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700362 xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
363 attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400364}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400365
366/*
367 * Map a single buffer of the indicated size for DMA in streaming mode. The
368 * physical address to use is returned.
369 *
370 * Once the device is given the dma address, the device owns this memory until
371 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
372 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200373static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400374 unsigned long offset, size_t size,
375 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700376 unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400377{
Alexander Duycke05ed4d2012-10-15 10:19:39 -0700378 phys_addr_t map, phys = page_to_phys(page) + offset;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700379 dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400380
381 BUG_ON(dir == DMA_NONE);
382 /*
383 * If the address happens to be in the device's DMA window,
384 * we can safely return the device addr and not worry about bounce
385 * buffering it.
386 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100387 if (dma_capable(dev, dev_addr, size, true) &&
Stefano Stabellinia4dba132014-11-21 11:07:39 +0000388 !range_straddles_page_boundary(phys, size) &&
Julien Grall291be102015-09-09 15:17:33 +0100389 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
Christoph Hellwig063b8272019-04-11 09:20:00 +0200390 swiotlb_force != SWIOTLB_FORCE)
391 goto done;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400392
393 /*
394 * Oh well, have to allocate and map a bounce buffer.
395 */
Zoltan Kiss2b2b6142013-09-04 21:11:05 +0100396 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
397
Stefano Stabelliniae4f0a12020-07-10 15:34:18 -0700398 map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
399 phys, size, size, dir, attrs);
Arnd Bergmann9c106119f2019-06-17 15:28:43 +0200400 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
Christoph Hellwiga4abe0a2018-11-21 19:38:19 +0100401 return DMA_MAPPING_ERROR;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400402
Christoph Hellwigb4dca152019-09-05 10:04:30 +0200403 phys = map;
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700404 dev_addr = xen_phys_to_dma(dev, map);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400405
406 /*
407 * Ensure that the address returned is DMA'ble
408 */
Christoph Hellwig68a33b12019-11-19 17:38:58 +0100409 if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800410 swiotlb_tbl_unmap_single(dev, map, size, size, dir,
Christoph Hellwig063b8272019-04-11 09:20:00 +0200411 attrs | DMA_ATTR_SKIP_CPU_SYNC);
412 return DMA_MAPPING_ERROR;
413 }
Alexander Duyck76418422016-11-02 07:12:47 -0400414
Christoph Hellwig063b8272019-04-11 09:20:00 +0200415done:
Stefano Stabellini63f06202020-07-10 15:34:26 -0700416 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
417 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
418 arch_sync_dma_for_device(phys, size, dir);
419 else
420 xen_dma_sync_for_device(dev, dev_addr, size, dir);
421 }
Christoph Hellwig063b8272019-04-11 09:20:00 +0200422 return dev_addr;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400423}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400424
425/*
426 * Unmap a single streaming mode DMA translation. The dma_addr and size must
427 * match what was provided for in a previous xen_swiotlb_map_page call. All
428 * other usages are undefined.
429 *
430 * After this call, reads by the cpu to the buffer are guaranteed to see
431 * whatever the device wrote there.
432 */
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200433static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
434 size_t size, enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400435{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700436 phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400437
438 BUG_ON(dir == DMA_NONE);
439
Stefano Stabellini63f06202020-07-10 15:34:26 -0700440 if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
441 if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
442 arch_sync_dma_for_cpu(paddr, size, dir);
443 else
444 xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
445 }
Stefano Stabellini6cf05462013-10-25 10:33:25 +0000446
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400447 /* NOTE: We use dev_addr here, not paddr! */
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700448 if (is_xen_swiotlb_buffer(hwdev, dev_addr))
Lu Baolu3fc1ca02019-09-06 14:14:48 +0800449 swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400450}
451
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400452static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200453xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
454 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400455{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700456 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400457
Stefano Stabellini63f06202020-07-10 15:34:26 -0700458 if (!dev_is_dma_coherent(dev)) {
459 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
460 arch_sync_dma_for_cpu(paddr, size, dir);
461 else
462 xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
463 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400464
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700465 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200466 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400467}
468
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200469static void
470xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
471 size_t size, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400472{
Stefano Stabellini91ffe4a2020-07-10 15:34:25 -0700473 phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400474
Stefano Stabellini38ba51d2020-07-10 15:34:23 -0700475 if (is_xen_swiotlb_buffer(dev, dma_addr))
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200476 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
477
Stefano Stabellini63f06202020-07-10 15:34:26 -0700478 if (!dev_is_dma_coherent(dev)) {
479 if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
480 arch_sync_dma_for_device(paddr, size, dir);
481 else
482 xen_dma_sync_for_device(dev, dma_addr, size, dir);
483 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400484}
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200485
486/*
487 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
488 * concerning calls here are the same as for swiotlb_unmap_page() above.
489 */
490static void
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200491xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
492 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200493{
494 struct scatterlist *sg;
495 int i;
496
497 BUG_ON(dir == DMA_NONE);
498
499 for_each_sg(sgl, sg, nelems, i)
Christoph Hellwigbf7954e2019-07-24 16:18:41 +0200500 xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg),
501 dir, attrs);
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200502
503}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400504
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200505static int
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200506xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200507 enum dma_data_direction dir, unsigned long attrs)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400508{
509 struct scatterlist *sg;
510 int i;
511
512 BUG_ON(dir == DMA_NONE);
513
514 for_each_sg(sgl, sg, nelems, i) {
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200515 sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg),
516 sg->offset, sg->length, dir, attrs);
517 if (sg->dma_address == DMA_MAPPING_ERROR)
518 goto out_unmap;
Stefano Stabellini781575c2013-08-05 17:30:48 +0100519 sg_dma_len(sg) = sg->length;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400520 }
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200521
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400522 return nelems;
Christoph Hellwig8b35d9f2019-04-11 09:19:58 +0200523out_unmap:
524 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
525 sg_dma_len(sgl) = 0;
526 return 0;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400527}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400528
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400529static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200530xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
531 int nelems, enum dma_data_direction dir)
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400532{
533 struct scatterlist *sg;
534 int i;
535
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200536 for_each_sg(sgl, sg, nelems, i) {
537 xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address,
538 sg->length, dir);
539 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400540}
541
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200542static void
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200543xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400544 int nelems, enum dma_data_direction dir)
545{
Christoph Hellwig2e12dce2019-04-11 09:19:59 +0200546 struct scatterlist *sg;
547 int i;
548
549 for_each_sg(sgl, sg, nelems, i) {
550 xen_swiotlb_sync_single_for_device(dev, sg->dma_address,
551 sg->length, dir);
552 }
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400553}
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400554
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400555/*
556 * Return whether the given device DMA address mask can be supported
557 * properly. For example, if your device can only drive the low 24-bits
558 * during bus mastering, then you would pass 0x00ffffff as the mask to
559 * this function.
560 */
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200561static int
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400562xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
563{
Stefano Stabellini2cf6a912020-07-10 15:34:19 -0700564 return xen_virt_to_bus(hwdev, xen_io_tlb_end - 1) <= mask;
Konrad Rzeszutek Wilkb097186f2010-05-11 10:05:49 -0400565}
Stefano Stabellinieb1ddc02013-10-09 16:56:33 +0000566
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200567const struct dma_map_ops xen_swiotlb_dma_ops = {
568 .alloc = xen_swiotlb_alloc_coherent,
569 .free = xen_swiotlb_free_coherent,
570 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
571 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
572 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
573 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
Christoph Hellwigaca351c2019-04-11 09:19:57 +0200574 .map_sg = xen_swiotlb_map_sg,
575 .unmap_sg = xen_swiotlb_unmap_sg,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200576 .map_page = xen_swiotlb_map_page,
577 .unmap_page = xen_swiotlb_unmap_page,
578 .dma_supported = xen_swiotlb_dma_supported,
Christoph Hellwig922659e2019-09-02 10:45:39 +0200579 .mmap = dma_common_mmap,
580 .get_sgtable = dma_common_get_sgtable,
Christoph Hellwigefa70f22020-09-01 13:34:33 +0200581 .alloc_pages = dma_common_alloc_pages,
582 .free_pages = dma_common_free_pages,
Christoph Hellwigdceb1a62017-05-21 13:15:13 +0200583};