Thomas Gleixner | d952367 | 2019-05-29 07:18:01 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2010 |
| 4 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 5 | * |
| 6 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
| 7 | * |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 8 | * PV guests under Xen are running in an non-contiguous memory architecture. |
| 9 | * |
| 10 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
| 11 | * translating bus (DMA) to virtual and vice-versa and also providing a |
| 12 | * mechanism to have contiguous pages for device drivers operations (say DMA |
| 13 | * operations). |
| 14 | * |
| 15 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
| 16 | * assumes that pages start at zero and go up to the available memory. To |
| 17 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
| 18 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
| 19 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
| 20 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
| 21 | * from different pools, which means there is no guarantee that PFN==MFN |
| 22 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
| 23 | * allocated in descending order (high to low), meaning the guest might |
| 24 | * never get any MFN's under the 4GB mark. |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 25 | */ |
| 26 | |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 27 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
| 28 | |
Mike Rapoport | 2013288 | 2018-10-30 15:09:21 -0700 | [diff] [blame] | 29 | #include <linux/memblock.h> |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 30 | #include <linux/dma-direct.h> |
Christoph Hellwig | 9f4df96 | 2020-09-22 15:36:11 +0200 | [diff] [blame] | 31 | #include <linux/dma-map-ops.h> |
Paul Gortmaker | 63c9744 | 2011-07-10 13:22:07 -0400 | [diff] [blame] | 32 | #include <linux/export.h> |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 33 | #include <xen/swiotlb-xen.h> |
| 34 | #include <xen/page.h> |
| 35 | #include <xen/xen-ops.h> |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 36 | #include <xen/hvc-console.h> |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 37 | |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 38 | #include <asm/dma-mapping.h> |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 39 | #include <asm/xen/page-coherent.h> |
Konrad Rzeszutek Wilk | e1d8f62 | 2013-11-08 15:36:09 -0500 | [diff] [blame] | 40 | |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 41 | #include <trace/events/swiotlb.h> |
Souptick Joarder | e6fa0dc | 2019-09-02 14:09:58 +0530 | [diff] [blame] | 42 | #define MAX_DMA_BITS 32 |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 43 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 44 | /* |
| 45 | * Quick lookup value of the bus address of the IOTLB. |
| 46 | */ |
| 47 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 48 | static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 49 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 50 | unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 51 | phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 52 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 53 | baddr |= paddr & ~XEN_PAGE_MASK; |
| 54 | return baddr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 55 | } |
| 56 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 57 | static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 58 | { |
| 59 | return phys_to_dma(dev, xen_phys_to_bus(dev, paddr)); |
| 60 | } |
| 61 | |
| 62 | static inline phys_addr_t xen_bus_to_phys(struct device *dev, |
| 63 | phys_addr_t baddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 64 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 65 | unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 66 | phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) | |
| 67 | (baddr & ~XEN_PAGE_MASK); |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 68 | |
| 69 | return paddr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 70 | } |
| 71 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 72 | static inline phys_addr_t xen_dma_to_phys(struct device *dev, |
| 73 | dma_addr_t dma_addr) |
| 74 | { |
| 75 | return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr)); |
| 76 | } |
| 77 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 78 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 79 | { |
Juergen Gross | bf70726 | 2019-06-14 07:46:03 +0200 | [diff] [blame] | 80 | unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); |
| 81 | unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 82 | |
Juergen Gross | bf70726 | 2019-06-14 07:46:03 +0200 | [diff] [blame] | 83 | next_bfn = pfn_to_bfn(xen_pfn); |
| 84 | |
| 85 | for (i = 1; i < nr_pages; i++) |
| 86 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
| 87 | return 1; |
| 88 | |
| 89 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 90 | } |
| 91 | |
Stefano Stabellini | 38ba51d | 2020-07-10 15:34:23 -0700 | [diff] [blame] | 92 | static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 93 | { |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 94 | unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr)); |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 95 | unsigned long xen_pfn = bfn_to_local_pfn(bfn); |
Stefano Stabellini | e9aab7e | 2020-07-10 15:34:24 -0700 | [diff] [blame] | 96 | phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 97 | |
| 98 | /* If the address is outside our domain, it CAN |
| 99 | * have the same virtual address as another address |
| 100 | * in our domain. Therefore _only_ check address within our domain. |
| 101 | */ |
Christoph Hellwig | 16bc75f | 2021-03-01 08:44:27 +0100 | [diff] [blame] | 102 | if (pfn_valid(PFN_DOWN(paddr))) |
| 103 | return is_swiotlb_buffer(paddr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 104 | return 0; |
| 105 | } |
| 106 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 107 | static int |
| 108 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
| 109 | { |
| 110 | int i, rc; |
| 111 | int dma_bits; |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 112 | dma_addr_t dma_handle; |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 113 | phys_addr_t p = virt_to_phys(buf); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 114 | |
| 115 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
| 116 | |
| 117 | i = 0; |
| 118 | do { |
| 119 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
| 120 | |
| 121 | do { |
| 122 | rc = xen_create_contiguous_region( |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 123 | p + (i << IO_TLB_SHIFT), |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 124 | get_order(slabs << IO_TLB_SHIFT), |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 125 | dma_bits, &dma_handle); |
Souptick Joarder | e6fa0dc | 2019-09-02 14:09:58 +0530 | [diff] [blame] | 126 | } while (rc && dma_bits++ < MAX_DMA_BITS); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 127 | if (rc) |
| 128 | return rc; |
| 129 | |
| 130 | i += slabs; |
| 131 | } while (i < nslabs); |
| 132 | return 0; |
| 133 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 134 | |
| 135 | enum xen_swiotlb_err { |
| 136 | XEN_SWIOTLB_UNKNOWN = 0, |
| 137 | XEN_SWIOTLB_ENOMEM, |
| 138 | XEN_SWIOTLB_EFIXUP |
| 139 | }; |
| 140 | |
| 141 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) |
| 142 | { |
| 143 | switch (err) { |
| 144 | case XEN_SWIOTLB_ENOMEM: |
| 145 | return "Cannot allocate Xen-SWIOTLB buffer\n"; |
| 146 | case XEN_SWIOTLB_EFIXUP: |
| 147 | return "Failed to get contiguous memory for DMA from Xen!\n"\ |
| 148 | "You either: don't have the permissions, do not have"\ |
| 149 | " enough free memory under 4GB, or the hypervisor memory"\ |
| 150 | " is too fragmented!"; |
| 151 | default: |
| 152 | break; |
| 153 | } |
| 154 | return ""; |
| 155 | } |
Christoph Hellwig | 4035b43 | 2021-03-01 08:44:29 +0100 | [diff] [blame] | 156 | |
| 157 | #define DEFAULT_NSLABS ALIGN(SZ_64M >> IO_TLB_SHIFT, IO_TLB_SEGSIZE) |
| 158 | |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 159 | int __ref xen_swiotlb_init(void) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 160 | { |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 161 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 162 | unsigned long nslabs, bytes, order; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 163 | unsigned int repeat = 3; |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 164 | int rc = -ENOMEM; |
Christoph Hellwig | cbce995 | 2021-03-01 08:44:30 +0100 | [diff] [blame] | 165 | char *start; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 166 | |
Christoph Hellwig | cbce995 | 2021-03-01 08:44:30 +0100 | [diff] [blame] | 167 | nslabs = swiotlb_nr_tbl(); |
Christoph Hellwig | cbce995 | 2021-03-01 08:44:30 +0100 | [diff] [blame] | 168 | if (!nslabs) |
| 169 | nslabs = DEFAULT_NSLABS; |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 170 | retry: |
| 171 | m_ret = XEN_SWIOTLB_ENOMEM; |
Christoph Hellwig | cbce995 | 2021-03-01 08:44:30 +0100 | [diff] [blame] | 172 | bytes = nslabs << IO_TLB_SHIFT; |
Christoph Hellwig | 4035b43 | 2021-03-01 08:44:29 +0100 | [diff] [blame] | 173 | order = get_order(bytes); |
Stefano Stabellini | 4e7372e | 2019-05-28 15:48:22 -0700 | [diff] [blame] | 174 | |
| 175 | /* |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 176 | * Get IO TLB memory from any location. |
| 177 | */ |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 178 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
| 179 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 180 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
| 181 | start = (void *)xen_get_swiotlb_free_pages(order); |
| 182 | if (start) |
| 183 | break; |
| 184 | order--; |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 185 | } |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 186 | if (!start) |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 187 | goto error; |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 188 | if (order != get_order(bytes)) { |
| 189 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
| 190 | (PAGE_SIZE << order) >> 20); |
| 191 | nslabs = SLABS_PER_PAGE << order; |
| 192 | bytes = nslabs << IO_TLB_SHIFT; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 193 | } |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 194 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 195 | /* |
| 196 | * And replace that memory with pages under 4GB. |
| 197 | */ |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 198 | rc = xen_swiotlb_fixup(start, bytes, nslabs); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 199 | if (rc) { |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 200 | free_pages((unsigned long)start, order); |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 201 | m_ret = XEN_SWIOTLB_EFIXUP; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 202 | goto error; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 203 | } |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 204 | rc = swiotlb_late_init_with_tbl(start, nslabs); |
| 205 | if (rc) |
| 206 | return rc; |
| 207 | swiotlb_set_max_segment(PAGE_SIZE); |
| 208 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 209 | error: |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 210 | if (repeat--) { |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 211 | /* Min is 2MB */ |
| 212 | nslabs = max(1024UL, (nslabs >> 1)); |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 213 | pr_info("Lowering to %luMB\n", |
Christoph Hellwig | cbce995 | 2021-03-01 08:44:30 +0100 | [diff] [blame] | 214 | (nslabs << IO_TLB_SHIFT) >> 20); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 215 | goto retry; |
| 216 | } |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 217 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 218 | free_pages((unsigned long)start, order); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 219 | return rc; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 220 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 221 | |
Christoph Hellwig | a98f565 | 2021-03-01 08:44:32 +0100 | [diff] [blame^] | 222 | #ifdef CONFIG_X86 |
| 223 | void __init xen_swiotlb_init_early(void) |
| 224 | { |
| 225 | unsigned long nslabs, bytes; |
| 226 | unsigned int repeat = 3; |
| 227 | char *start; |
| 228 | int rc; |
| 229 | |
| 230 | nslabs = swiotlb_nr_tbl(); |
| 231 | if (!nslabs) |
| 232 | nslabs = DEFAULT_NSLABS; |
| 233 | retry: |
| 234 | /* |
| 235 | * Get IO TLB memory from any location. |
| 236 | */ |
| 237 | bytes = nslabs << IO_TLB_SHIFT; |
| 238 | start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE); |
| 239 | if (!start) |
| 240 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 241 | __func__, PAGE_ALIGN(bytes), PAGE_SIZE); |
| 242 | |
| 243 | /* |
| 244 | * And replace that memory with pages under 4GB. |
| 245 | */ |
| 246 | rc = xen_swiotlb_fixup(start, bytes, nslabs); |
| 247 | if (rc) { |
| 248 | memblock_free(__pa(start), PAGE_ALIGN(bytes)); |
| 249 | if (repeat--) { |
| 250 | /* Min is 2MB */ |
| 251 | nslabs = max(1024UL, (nslabs >> 1)); |
| 252 | pr_info("Lowering to %luMB\n", |
| 253 | (nslabs << IO_TLB_SHIFT) >> 20); |
| 254 | goto retry; |
| 255 | } |
| 256 | panic("%s (rc:%d)", xen_swiotlb_error(XEN_SWIOTLB_EFIXUP), rc); |
| 257 | } |
| 258 | |
| 259 | if (swiotlb_init_with_tbl(start, nslabs, false)) |
| 260 | panic("Cannot allocate SWIOTLB buffer"); |
| 261 | swiotlb_set_max_segment(PAGE_SIZE); |
| 262 | } |
| 263 | #endif /* CONFIG_X86 */ |
| 264 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 265 | static void * |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 266 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 267 | dma_addr_t *dma_handle, gfp_t flags, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 268 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 269 | { |
| 270 | void *ret; |
| 271 | int order = get_order(size); |
| 272 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 273 | phys_addr_t phys; |
| 274 | dma_addr_t dev_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 275 | |
| 276 | /* |
| 277 | * Ignore region specifiers - the kernel's ideas of |
| 278 | * pseudo-phys memory layout has nothing to do with the |
| 279 | * machine physical layout. We can't allocate highmem |
| 280 | * because we can't return a pointer to it. |
| 281 | */ |
| 282 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 283 | |
Joe Jin | 7250f42 | 2018-10-16 15:21:16 -0700 | [diff] [blame] | 284 | /* Convert the size to actually allocated. */ |
| 285 | size = 1UL << (order + XEN_PAGE_SHIFT); |
| 286 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 287 | /* On ARM this function returns an ioremap'ped virtual address for |
| 288 | * which virt_to_phys doesn't return the corresponding physical |
| 289 | * address. In fact on ARM virt_to_phys only works for kernel direct |
| 290 | * mapped RAM memory. Also see comment below. |
| 291 | */ |
| 292 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 293 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 294 | if (!ret) |
| 295 | return ret; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 296 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 297 | if (hwdev && hwdev->coherent_dma_mask) |
Christoph Hellwig | 038d07a | 2018-03-19 11:38:14 +0100 | [diff] [blame] | 298 | dma_mask = hwdev->coherent_dma_mask; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 299 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 300 | /* At this point dma_handle is the dma address, next we are |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 301 | * going to set it to the machine address. |
| 302 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond |
| 303 | * to *dma_handle. */ |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 304 | phys = dma_to_phys(hwdev, *dma_handle); |
| 305 | dev_addr = xen_phys_to_dma(hwdev, phys); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 306 | if (((dev_addr + size - 1 <= dma_mask)) && |
| 307 | !range_straddles_page_boundary(phys, size)) |
| 308 | *dma_handle = dev_addr; |
| 309 | else { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 310 | if (xen_create_contiguous_region(phys, order, |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 311 | fls64(dma_mask), dma_handle) != 0) { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 312 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 313 | return NULL; |
| 314 | } |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 315 | *dma_handle = phys_to_dma(hwdev, *dma_handle); |
Juergen Gross | b877ac9 | 2019-06-14 07:46:04 +0200 | [diff] [blame] | 316 | SetPageXenRemapped(virt_to_page(ret)); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 317 | } |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 318 | memset(ret, 0, size); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 319 | return ret; |
| 320 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 321 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 322 | static void |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 323 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 324 | dma_addr_t dev_addr, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 325 | { |
| 326 | int order = get_order(size); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 327 | phys_addr_t phys; |
| 328 | u64 dma_mask = DMA_BIT_MASK(32); |
Boris Ostrovsky | 8b1e868 | 2020-07-10 15:34:17 -0700 | [diff] [blame] | 329 | struct page *page; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 330 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 331 | if (hwdev && hwdev->coherent_dma_mask) |
| 332 | dma_mask = hwdev->coherent_dma_mask; |
| 333 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 334 | /* do not use virt_to_phys because on ARM it doesn't return you the |
| 335 | * physical address */ |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 336 | phys = xen_dma_to_phys(hwdev, dev_addr); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 337 | |
Joe Jin | 7250f42 | 2018-10-16 15:21:16 -0700 | [diff] [blame] | 338 | /* Convert the size to actually allocated. */ |
| 339 | size = 1UL << (order + XEN_PAGE_SHIFT); |
| 340 | |
Boris Ostrovsky | 8b1e868 | 2020-07-10 15:34:17 -0700 | [diff] [blame] | 341 | if (is_vmalloc_addr(vaddr)) |
| 342 | page = vmalloc_to_page(vaddr); |
| 343 | else |
| 344 | page = virt_to_page(vaddr); |
| 345 | |
Juergen Gross | 50f6393 | 2019-06-14 07:46:02 +0200 | [diff] [blame] | 346 | if (!WARN_ON((dev_addr + size - 1 > dma_mask) || |
Juergen Gross | b877ac9 | 2019-06-14 07:46:04 +0200 | [diff] [blame] | 347 | range_straddles_page_boundary(phys, size)) && |
Boris Ostrovsky | 8b1e868 | 2020-07-10 15:34:17 -0700 | [diff] [blame] | 348 | TestClearPageXenRemapped(page)) |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 349 | xen_destroy_contiguous_region(phys, order); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 350 | |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 351 | xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys), |
| 352 | attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 353 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 354 | |
| 355 | /* |
| 356 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 357 | * physical address to use is returned. |
| 358 | * |
| 359 | * Once the device is given the dma address, the device owns this memory until |
| 360 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
| 361 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 362 | static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 363 | unsigned long offset, size_t size, |
| 364 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 365 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 366 | { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 367 | phys_addr_t map, phys = page_to_phys(page) + offset; |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 368 | dma_addr_t dev_addr = xen_phys_to_dma(dev, phys); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 369 | |
| 370 | BUG_ON(dir == DMA_NONE); |
| 371 | /* |
| 372 | * If the address happens to be in the device's DMA window, |
| 373 | * we can safely return the device addr and not worry about bounce |
| 374 | * buffering it. |
| 375 | */ |
Christoph Hellwig | 68a33b1 | 2019-11-19 17:38:58 +0100 | [diff] [blame] | 376 | if (dma_capable(dev, dev_addr, size, true) && |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 377 | !range_straddles_page_boundary(phys, size) && |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 378 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 379 | swiotlb_force != SWIOTLB_FORCE) |
| 380 | goto done; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 381 | |
| 382 | /* |
| 383 | * Oh well, have to allocate and map a bounce buffer. |
| 384 | */ |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 385 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
| 386 | |
Christoph Hellwig | fc0021a | 2020-10-23 08:33:09 +0200 | [diff] [blame] | 387 | map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs); |
Arnd Bergmann | 9c106119f | 2019-06-17 15:28:43 +0200 | [diff] [blame] | 388 | if (map == (phys_addr_t)DMA_MAPPING_ERROR) |
Christoph Hellwig | a4abe0a | 2018-11-21 19:38:19 +0100 | [diff] [blame] | 389 | return DMA_MAPPING_ERROR; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 390 | |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 391 | phys = map; |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 392 | dev_addr = xen_phys_to_dma(dev, map); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 393 | |
| 394 | /* |
| 395 | * Ensure that the address returned is DMA'ble |
| 396 | */ |
Christoph Hellwig | 68a33b1 | 2019-11-19 17:38:58 +0100 | [diff] [blame] | 397 | if (unlikely(!dma_capable(dev, dev_addr, size, true))) { |
Christoph Hellwig | 2973073 | 2021-03-01 08:44:24 +0100 | [diff] [blame] | 398 | swiotlb_tbl_unmap_single(dev, map, size, dir, |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 399 | attrs | DMA_ATTR_SKIP_CPU_SYNC); |
| 400 | return DMA_MAPPING_ERROR; |
| 401 | } |
Alexander Duyck | 7641842 | 2016-11-02 07:12:47 -0400 | [diff] [blame] | 402 | |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 403 | done: |
Stefano Stabellini | 63f0620 | 2020-07-10 15:34:26 -0700 | [diff] [blame] | 404 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { |
| 405 | if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr)))) |
| 406 | arch_sync_dma_for_device(phys, size, dir); |
| 407 | else |
| 408 | xen_dma_sync_for_device(dev, dev_addr, size, dir); |
| 409 | } |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 410 | return dev_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 411 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 412 | |
| 413 | /* |
| 414 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 415 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
| 416 | * other usages are undefined. |
| 417 | * |
| 418 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 419 | * whatever the device wrote there. |
| 420 | */ |
Christoph Hellwig | bf7954e | 2019-07-24 16:18:41 +0200 | [diff] [blame] | 421 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
| 422 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 423 | { |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 424 | phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 425 | |
| 426 | BUG_ON(dir == DMA_NONE); |
| 427 | |
Stefano Stabellini | 63f0620 | 2020-07-10 15:34:26 -0700 | [diff] [blame] | 428 | if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { |
| 429 | if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr)))) |
| 430 | arch_sync_dma_for_cpu(paddr, size, dir); |
| 431 | else |
| 432 | xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir); |
| 433 | } |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 434 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 435 | /* NOTE: We use dev_addr here, not paddr! */ |
Stefano Stabellini | 38ba51d | 2020-07-10 15:34:23 -0700 | [diff] [blame] | 436 | if (is_xen_swiotlb_buffer(hwdev, dev_addr)) |
Christoph Hellwig | 2973073 | 2021-03-01 08:44:24 +0100 | [diff] [blame] | 437 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 438 | } |
| 439 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 440 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 441 | xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, |
| 442 | size_t size, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 443 | { |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 444 | phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 445 | |
Stefano Stabellini | 63f0620 | 2020-07-10 15:34:26 -0700 | [diff] [blame] | 446 | if (!dev_is_dma_coherent(dev)) { |
| 447 | if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) |
| 448 | arch_sync_dma_for_cpu(paddr, size, dir); |
| 449 | else |
| 450 | xen_dma_sync_for_cpu(dev, dma_addr, size, dir); |
| 451 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 452 | |
Stefano Stabellini | 38ba51d | 2020-07-10 15:34:23 -0700 | [diff] [blame] | 453 | if (is_xen_swiotlb_buffer(dev, dma_addr)) |
Christoph Hellwig | 80808d2 | 2021-03-01 08:44:26 +0100 | [diff] [blame] | 454 | swiotlb_sync_single_for_cpu(dev, paddr, size, dir); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 455 | } |
| 456 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 457 | static void |
| 458 | xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, |
| 459 | size_t size, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 460 | { |
Stefano Stabellini | 91ffe4a | 2020-07-10 15:34:25 -0700 | [diff] [blame] | 461 | phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 462 | |
Stefano Stabellini | 38ba51d | 2020-07-10 15:34:23 -0700 | [diff] [blame] | 463 | if (is_xen_swiotlb_buffer(dev, dma_addr)) |
Christoph Hellwig | 80808d2 | 2021-03-01 08:44:26 +0100 | [diff] [blame] | 464 | swiotlb_sync_single_for_device(dev, paddr, size, dir); |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 465 | |
Stefano Stabellini | 63f0620 | 2020-07-10 15:34:26 -0700 | [diff] [blame] | 466 | if (!dev_is_dma_coherent(dev)) { |
| 467 | if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr)))) |
| 468 | arch_sync_dma_for_device(paddr, size, dir); |
| 469 | else |
| 470 | xen_dma_sync_for_device(dev, dma_addr, size, dir); |
| 471 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 472 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 473 | |
| 474 | /* |
| 475 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 476 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 477 | */ |
| 478 | static void |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 479 | xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
| 480 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 481 | { |
| 482 | struct scatterlist *sg; |
| 483 | int i; |
| 484 | |
| 485 | BUG_ON(dir == DMA_NONE); |
| 486 | |
| 487 | for_each_sg(sgl, sg, nelems, i) |
Christoph Hellwig | bf7954e | 2019-07-24 16:18:41 +0200 | [diff] [blame] | 488 | xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), |
| 489 | dir, attrs); |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 490 | |
| 491 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 492 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 493 | static int |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 494 | xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 495 | enum dma_data_direction dir, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 496 | { |
| 497 | struct scatterlist *sg; |
| 498 | int i; |
| 499 | |
| 500 | BUG_ON(dir == DMA_NONE); |
| 501 | |
| 502 | for_each_sg(sgl, sg, nelems, i) { |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 503 | sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg), |
| 504 | sg->offset, sg->length, dir, attrs); |
| 505 | if (sg->dma_address == DMA_MAPPING_ERROR) |
| 506 | goto out_unmap; |
Stefano Stabellini | 781575c | 2013-08-05 17:30:48 +0100 | [diff] [blame] | 507 | sg_dma_len(sg) = sg->length; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 508 | } |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 509 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 510 | return nelems; |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 511 | out_unmap: |
| 512 | xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); |
| 513 | sg_dma_len(sgl) = 0; |
| 514 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 515 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 516 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 517 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 518 | xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
| 519 | int nelems, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 520 | { |
| 521 | struct scatterlist *sg; |
| 522 | int i; |
| 523 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 524 | for_each_sg(sgl, sg, nelems, i) { |
| 525 | xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address, |
| 526 | sg->length, dir); |
| 527 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 528 | } |
| 529 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 530 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 531 | xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 532 | int nelems, enum dma_data_direction dir) |
| 533 | { |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 534 | struct scatterlist *sg; |
| 535 | int i; |
| 536 | |
| 537 | for_each_sg(sgl, sg, nelems, i) { |
| 538 | xen_swiotlb_sync_single_for_device(dev, sg->dma_address, |
| 539 | sg->length, dir); |
| 540 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 541 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 542 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 543 | /* |
| 544 | * Return whether the given device DMA address mask can be supported |
| 545 | * properly. For example, if your device can only drive the low 24-bits |
| 546 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 547 | * this function. |
| 548 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 549 | static int |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 550 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 551 | { |
Christoph Hellwig | 6223d1c | 2021-03-01 08:44:28 +0100 | [diff] [blame] | 552 | return xen_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 553 | } |
Stefano Stabellini | eb1ddc0 | 2013-10-09 16:56:33 +0000 | [diff] [blame] | 554 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 555 | const struct dma_map_ops xen_swiotlb_dma_ops = { |
| 556 | .alloc = xen_swiotlb_alloc_coherent, |
| 557 | .free = xen_swiotlb_free_coherent, |
| 558 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
| 559 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
| 560 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
| 561 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 562 | .map_sg = xen_swiotlb_map_sg, |
| 563 | .unmap_sg = xen_swiotlb_unmap_sg, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 564 | .map_page = xen_swiotlb_map_page, |
| 565 | .unmap_page = xen_swiotlb_unmap_page, |
| 566 | .dma_supported = xen_swiotlb_dma_supported, |
Christoph Hellwig | 922659e | 2019-09-02 10:45:39 +0200 | [diff] [blame] | 567 | .mmap = dma_common_mmap, |
| 568 | .get_sgtable = dma_common_get_sgtable, |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 569 | .alloc_pages = dma_common_alloc_pages, |
| 570 | .free_pages = dma_common_free_pages, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 571 | }; |