Thomas Gleixner | d952367 | 2019-05-29 07:18:01 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2010 |
| 4 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 5 | * |
| 6 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
| 7 | * |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 8 | * PV guests under Xen are running in an non-contiguous memory architecture. |
| 9 | * |
| 10 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
| 11 | * translating bus (DMA) to virtual and vice-versa and also providing a |
| 12 | * mechanism to have contiguous pages for device drivers operations (say DMA |
| 13 | * operations). |
| 14 | * |
| 15 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
| 16 | * assumes that pages start at zero and go up to the available memory. To |
| 17 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
| 18 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
| 19 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
| 20 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
| 21 | * from different pools, which means there is no guarantee that PFN==MFN |
| 22 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
| 23 | * allocated in descending order (high to low), meaning the guest might |
| 24 | * never get any MFN's under the 4GB mark. |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 25 | */ |
| 26 | |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 27 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
| 28 | |
Mike Rapoport | 2013288 | 2018-10-30 15:09:21 -0700 | [diff] [blame] | 29 | #include <linux/memblock.h> |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 30 | #include <linux/dma-direct.h> |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 31 | #include <linux/dma-noncoherent.h> |
Paul Gortmaker | 63c9744 | 2011-07-10 13:22:07 -0400 | [diff] [blame] | 32 | #include <linux/export.h> |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 33 | #include <xen/swiotlb-xen.h> |
| 34 | #include <xen/page.h> |
| 35 | #include <xen/xen-ops.h> |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 36 | #include <xen/hvc-console.h> |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 37 | |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 38 | #include <asm/dma-mapping.h> |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 39 | #include <asm/xen/page-coherent.h> |
Konrad Rzeszutek Wilk | e1d8f62 | 2013-11-08 15:36:09 -0500 | [diff] [blame] | 40 | |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 41 | #include <trace/events/swiotlb.h> |
Souptick Joarder | e6fa0dc | 2019-09-02 14:09:58 +0530 | [diff] [blame] | 42 | #define MAX_DMA_BITS 32 |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 43 | /* |
| 44 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
| 45 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
| 46 | * API. |
| 47 | */ |
| 48 | |
| 49 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
| 50 | static unsigned long xen_io_tlb_nslabs; |
| 51 | /* |
| 52 | * Quick lookup value of the bus address of the IOTLB. |
| 53 | */ |
| 54 | |
Konrad Rzeszutek Wilk | b8b0f55 | 2012-08-21 14:49:34 -0400 | [diff] [blame] | 55 | static u64 start_dma_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 56 | |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 57 | /* |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 58 | * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 59 | * can be 32bit when dma_addr_t is 64bit leading to a loss in |
| 60 | * information if the shift is done before casting to 64bit. |
| 61 | */ |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 62 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 63 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 64 | unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); |
| 65 | dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 66 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 67 | dma |= paddr & ~XEN_PAGE_MASK; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 68 | |
| 69 | return dma; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 70 | } |
| 71 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 72 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 73 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 74 | unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); |
| 75 | dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 76 | phys_addr_t paddr = dma; |
| 77 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 78 | paddr |= baddr & ~XEN_PAGE_MASK; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 79 | |
| 80 | return paddr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 81 | } |
| 82 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 83 | static inline dma_addr_t xen_virt_to_bus(void *address) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 84 | { |
| 85 | return xen_phys_to_bus(virt_to_phys(address)); |
| 86 | } |
| 87 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 88 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 89 | { |
Juergen Gross | bf70726 | 2019-06-14 07:46:03 +0200 | [diff] [blame] | 90 | unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); |
| 91 | unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 92 | |
Juergen Gross | bf70726 | 2019-06-14 07:46:03 +0200 | [diff] [blame] | 93 | next_bfn = pfn_to_bfn(xen_pfn); |
| 94 | |
| 95 | for (i = 1; i < nr_pages; i++) |
| 96 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
| 97 | return 1; |
| 98 | |
| 99 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
| 103 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 104 | unsigned long bfn = XEN_PFN_DOWN(dma_addr); |
| 105 | unsigned long xen_pfn = bfn_to_local_pfn(bfn); |
| 106 | phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 107 | |
| 108 | /* If the address is outside our domain, it CAN |
| 109 | * have the same virtual address as another address |
| 110 | * in our domain. Therefore _only_ check address within our domain. |
| 111 | */ |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 112 | if (pfn_valid(PFN_DOWN(paddr))) { |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 113 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
| 114 | paddr < virt_to_phys(xen_io_tlb_end); |
| 115 | } |
| 116 | return 0; |
| 117 | } |
| 118 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 119 | static int |
| 120 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
| 121 | { |
| 122 | int i, rc; |
| 123 | int dma_bits; |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 124 | dma_addr_t dma_handle; |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 125 | phys_addr_t p = virt_to_phys(buf); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 126 | |
| 127 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
| 128 | |
| 129 | i = 0; |
| 130 | do { |
| 131 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
| 132 | |
| 133 | do { |
| 134 | rc = xen_create_contiguous_region( |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 135 | p + (i << IO_TLB_SHIFT), |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 136 | get_order(slabs << IO_TLB_SHIFT), |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 137 | dma_bits, &dma_handle); |
Souptick Joarder | e6fa0dc | 2019-09-02 14:09:58 +0530 | [diff] [blame] | 138 | } while (rc && dma_bits++ < MAX_DMA_BITS); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 139 | if (rc) |
| 140 | return rc; |
| 141 | |
| 142 | i += slabs; |
| 143 | } while (i < nslabs); |
| 144 | return 0; |
| 145 | } |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 146 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 147 | { |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 148 | if (!nr_tbl) { |
FUJITA Tomonori | 5f98ecd | 2011-06-05 11:47:29 +0900 | [diff] [blame] | 149 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
| 150 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 151 | } else |
| 152 | xen_io_tlb_nslabs = nr_tbl; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 153 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 154 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 155 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 156 | |
| 157 | enum xen_swiotlb_err { |
| 158 | XEN_SWIOTLB_UNKNOWN = 0, |
| 159 | XEN_SWIOTLB_ENOMEM, |
| 160 | XEN_SWIOTLB_EFIXUP |
| 161 | }; |
| 162 | |
| 163 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) |
| 164 | { |
| 165 | switch (err) { |
| 166 | case XEN_SWIOTLB_ENOMEM: |
| 167 | return "Cannot allocate Xen-SWIOTLB buffer\n"; |
| 168 | case XEN_SWIOTLB_EFIXUP: |
| 169 | return "Failed to get contiguous memory for DMA from Xen!\n"\ |
| 170 | "You either: don't have the permissions, do not have"\ |
| 171 | " enough free memory under 4GB, or the hypervisor memory"\ |
| 172 | " is too fragmented!"; |
| 173 | default: |
| 174 | break; |
| 175 | } |
| 176 | return ""; |
| 177 | } |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 178 | int __ref xen_swiotlb_init(int verbose, bool early) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 179 | { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 180 | unsigned long bytes, order; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 181 | int rc = -ENOMEM; |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 182 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 183 | unsigned int repeat = 3; |
| 184 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 185 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 186 | retry: |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 187 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 188 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
Stefano Stabellini | 4e7372e | 2019-05-28 15:48:22 -0700 | [diff] [blame] | 189 | |
| 190 | /* |
| 191 | * IO TLB memory already allocated. Just use it. |
| 192 | */ |
| 193 | if (io_tlb_start != 0) { |
| 194 | xen_io_tlb_start = phys_to_virt(io_tlb_start); |
| 195 | goto end; |
| 196 | } |
| 197 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 198 | /* |
| 199 | * Get IO TLB memory from any location. |
| 200 | */ |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 201 | if (early) { |
Mike Rapoport | 15c3c11 | 2018-10-30 15:08:58 -0700 | [diff] [blame] | 202 | xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes), |
| 203 | PAGE_SIZE); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 204 | if (!xen_io_tlb_start) |
| 205 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 206 | __func__, PAGE_ALIGN(bytes), PAGE_SIZE); |
| 207 | } else { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 208 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
| 209 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 210 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
Stefano Stabellini | 8746515 | 2015-04-24 10:16:40 +0100 | [diff] [blame] | 211 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 212 | if (xen_io_tlb_start) |
| 213 | break; |
| 214 | order--; |
| 215 | } |
| 216 | if (order != get_order(bytes)) { |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 217 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
| 218 | (PAGE_SIZE << order) >> 20); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 219 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
| 220 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 221 | } |
| 222 | } |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 223 | if (!xen_io_tlb_start) { |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 224 | m_ret = XEN_SWIOTLB_ENOMEM; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 225 | goto error; |
| 226 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 227 | /* |
| 228 | * And replace that memory with pages under 4GB. |
| 229 | */ |
| 230 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
| 231 | bytes, |
| 232 | xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 233 | if (rc) { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 234 | if (early) |
Mike Rapoport | 2013288 | 2018-10-30 15:09:21 -0700 | [diff] [blame] | 235 | memblock_free(__pa(xen_io_tlb_start), |
| 236 | PAGE_ALIGN(bytes)); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 237 | else { |
| 238 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 239 | xen_io_tlb_start = NULL; |
| 240 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 241 | m_ret = XEN_SWIOTLB_EFIXUP; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 242 | goto error; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 243 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 244 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 245 | if (early) { |
Yinghai Lu | ac2cbab | 2013-01-24 12:20:16 -0800 | [diff] [blame] | 246 | if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, |
| 247 | verbose)) |
| 248 | panic("Cannot allocate SWIOTLB buffer"); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 249 | rc = 0; |
| 250 | } else |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 251 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | 7453c54 | 2016-12-20 10:02:02 -0500 | [diff] [blame] | 252 | |
Stefano Stabellini | 4e7372e | 2019-05-28 15:48:22 -0700 | [diff] [blame] | 253 | end: |
| 254 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
Konrad Rzeszutek Wilk | 7453c54 | 2016-12-20 10:02:02 -0500 | [diff] [blame] | 255 | if (!rc) |
| 256 | swiotlb_set_max_segment(PAGE_SIZE); |
| 257 | |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 258 | return rc; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 259 | error: |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 260 | if (repeat--) { |
| 261 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
| 262 | (xen_io_tlb_nslabs >> 1)); |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 263 | pr_info("Lowering to %luMB\n", |
| 264 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 265 | goto retry; |
| 266 | } |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 267 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 268 | if (early) |
| 269 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); |
| 270 | else |
| 271 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 272 | return rc; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 273 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 274 | |
| 275 | static void * |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 276 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 277 | dma_addr_t *dma_handle, gfp_t flags, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 278 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 279 | { |
| 280 | void *ret; |
| 281 | int order = get_order(size); |
| 282 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 283 | phys_addr_t phys; |
| 284 | dma_addr_t dev_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 285 | |
| 286 | /* |
| 287 | * Ignore region specifiers - the kernel's ideas of |
| 288 | * pseudo-phys memory layout has nothing to do with the |
| 289 | * machine physical layout. We can't allocate highmem |
| 290 | * because we can't return a pointer to it. |
| 291 | */ |
| 292 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 293 | |
Joe Jin | 7250f42 | 2018-10-16 15:21:16 -0700 | [diff] [blame] | 294 | /* Convert the size to actually allocated. */ |
| 295 | size = 1UL << (order + XEN_PAGE_SHIFT); |
| 296 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 297 | /* On ARM this function returns an ioremap'ped virtual address for |
| 298 | * which virt_to_phys doesn't return the corresponding physical |
| 299 | * address. In fact on ARM virt_to_phys only works for kernel direct |
| 300 | * mapped RAM memory. Also see comment below. |
| 301 | */ |
| 302 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 303 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 304 | if (!ret) |
| 305 | return ret; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 306 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 307 | if (hwdev && hwdev->coherent_dma_mask) |
Christoph Hellwig | 038d07a | 2018-03-19 11:38:14 +0100 | [diff] [blame] | 308 | dma_mask = hwdev->coherent_dma_mask; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 309 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 310 | /* At this point dma_handle is the physical address, next we are |
| 311 | * going to set it to the machine address. |
| 312 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond |
| 313 | * to *dma_handle. */ |
| 314 | phys = *dma_handle; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 315 | dev_addr = xen_phys_to_bus(phys); |
| 316 | if (((dev_addr + size - 1 <= dma_mask)) && |
| 317 | !range_straddles_page_boundary(phys, size)) |
| 318 | *dma_handle = dev_addr; |
| 319 | else { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 320 | if (xen_create_contiguous_region(phys, order, |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 321 | fls64(dma_mask), dma_handle) != 0) { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 322 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 323 | return NULL; |
| 324 | } |
Juergen Gross | b877ac9 | 2019-06-14 07:46:04 +0200 | [diff] [blame] | 325 | SetPageXenRemapped(virt_to_page(ret)); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 326 | } |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 327 | memset(ret, 0, size); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 328 | return ret; |
| 329 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 330 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 331 | static void |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 332 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 333 | dma_addr_t dev_addr, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 334 | { |
| 335 | int order = get_order(size); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 336 | phys_addr_t phys; |
| 337 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 338 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 339 | if (hwdev && hwdev->coherent_dma_mask) |
| 340 | dma_mask = hwdev->coherent_dma_mask; |
| 341 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 342 | /* do not use virt_to_phys because on ARM it doesn't return you the |
| 343 | * physical address */ |
| 344 | phys = xen_bus_to_phys(dev_addr); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 345 | |
Joe Jin | 7250f42 | 2018-10-16 15:21:16 -0700 | [diff] [blame] | 346 | /* Convert the size to actually allocated. */ |
| 347 | size = 1UL << (order + XEN_PAGE_SHIFT); |
| 348 | |
Juergen Gross | 50f6393 | 2019-06-14 07:46:02 +0200 | [diff] [blame] | 349 | if (!WARN_ON((dev_addr + size - 1 > dma_mask) || |
Juergen Gross | b877ac9 | 2019-06-14 07:46:04 +0200 | [diff] [blame] | 350 | range_straddles_page_boundary(phys, size)) && |
| 351 | TestClearPageXenRemapped(virt_to_page(vaddr))) |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 352 | xen_destroy_contiguous_region(phys, order); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 353 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 354 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 355 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 356 | |
| 357 | /* |
| 358 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 359 | * physical address to use is returned. |
| 360 | * |
| 361 | * Once the device is given the dma address, the device owns this memory until |
| 362 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
| 363 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 364 | static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 365 | unsigned long offset, size_t size, |
| 366 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 367 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 368 | { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 369 | phys_addr_t map, phys = page_to_phys(page) + offset; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 370 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 371 | |
| 372 | BUG_ON(dir == DMA_NONE); |
| 373 | /* |
| 374 | * If the address happens to be in the device's DMA window, |
| 375 | * we can safely return the device addr and not worry about bounce |
| 376 | * buffering it. |
| 377 | */ |
Christoph Hellwig | 68a33b1 | 2019-11-19 17:38:58 +0100 | [diff] [blame] | 378 | if (dma_capable(dev, dev_addr, size, true) && |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 379 | !range_straddles_page_boundary(phys, size) && |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 380 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 381 | swiotlb_force != SWIOTLB_FORCE) |
| 382 | goto done; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 383 | |
| 384 | /* |
| 385 | * Oh well, have to allocate and map a bounce buffer. |
| 386 | */ |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 387 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
| 388 | |
Lu Baolu | 3fc1ca0 | 2019-09-06 14:14:48 +0800 | [diff] [blame] | 389 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, |
| 390 | size, size, dir, attrs); |
Arnd Bergmann | 9c106119f | 2019-06-17 15:28:43 +0200 | [diff] [blame] | 391 | if (map == (phys_addr_t)DMA_MAPPING_ERROR) |
Christoph Hellwig | a4abe0a | 2018-11-21 19:38:19 +0100 | [diff] [blame] | 392 | return DMA_MAPPING_ERROR; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 393 | |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 394 | phys = map; |
Stefano Stabellini | f1225ee | 2017-01-19 10:39:09 -0800 | [diff] [blame] | 395 | dev_addr = xen_phys_to_bus(map); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 396 | |
| 397 | /* |
| 398 | * Ensure that the address returned is DMA'ble |
| 399 | */ |
Christoph Hellwig | 68a33b1 | 2019-11-19 17:38:58 +0100 | [diff] [blame] | 400 | if (unlikely(!dma_capable(dev, dev_addr, size, true))) { |
Lu Baolu | 3fc1ca0 | 2019-09-06 14:14:48 +0800 | [diff] [blame] | 401 | swiotlb_tbl_unmap_single(dev, map, size, size, dir, |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 402 | attrs | DMA_ATTR_SKIP_CPU_SYNC); |
| 403 | return DMA_MAPPING_ERROR; |
| 404 | } |
Alexander Duyck | 7641842 | 2016-11-02 07:12:47 -0400 | [diff] [blame] | 405 | |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 406 | done: |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 407 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 408 | xen_dma_sync_for_device(dev_addr, phys, size, dir); |
Christoph Hellwig | 063b827 | 2019-04-11 09:20:00 +0200 | [diff] [blame] | 409 | return dev_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 410 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 411 | |
| 412 | /* |
| 413 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 414 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
| 415 | * other usages are undefined. |
| 416 | * |
| 417 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 418 | * whatever the device wrote there. |
| 419 | */ |
Christoph Hellwig | bf7954e | 2019-07-24 16:18:41 +0200 | [diff] [blame] | 420 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
| 421 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 422 | { |
| 423 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 424 | |
| 425 | BUG_ON(dir == DMA_NONE); |
| 426 | |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 427 | if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 428 | xen_dma_sync_for_cpu(dev_addr, paddr, size, dir); |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 429 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 430 | /* NOTE: We use dev_addr here, not paddr! */ |
Christoph Hellwig | 68c6083 | 2018-12-06 07:06:04 -0800 | [diff] [blame] | 431 | if (is_xen_swiotlb_buffer(dev_addr)) |
Lu Baolu | 3fc1ca0 | 2019-09-06 14:14:48 +0800 | [diff] [blame] | 432 | swiotlb_tbl_unmap_single(hwdev, paddr, size, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 433 | } |
| 434 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 435 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 436 | xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, |
| 437 | size_t size, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 438 | { |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 439 | phys_addr_t paddr = xen_bus_to_phys(dma_addr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 440 | |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 441 | if (!dev_is_dma_coherent(dev)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 442 | xen_dma_sync_for_cpu(dma_addr, paddr, size, dir); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 443 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 444 | if (is_xen_swiotlb_buffer(dma_addr)) |
| 445 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 446 | } |
| 447 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 448 | static void |
| 449 | xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, |
| 450 | size_t size, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 451 | { |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 452 | phys_addr_t paddr = xen_bus_to_phys(dma_addr); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 453 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 454 | if (is_xen_swiotlb_buffer(dma_addr)) |
| 455 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); |
| 456 | |
Christoph Hellwig | b4dca15 | 2019-09-05 10:04:30 +0200 | [diff] [blame] | 457 | if (!dev_is_dma_coherent(dev)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 458 | xen_dma_sync_for_device(dma_addr, paddr, size, dir); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 459 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 460 | |
| 461 | /* |
| 462 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 463 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 464 | */ |
| 465 | static void |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 466 | xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, |
| 467 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 468 | { |
| 469 | struct scatterlist *sg; |
| 470 | int i; |
| 471 | |
| 472 | BUG_ON(dir == DMA_NONE); |
| 473 | |
| 474 | for_each_sg(sgl, sg, nelems, i) |
Christoph Hellwig | bf7954e | 2019-07-24 16:18:41 +0200 | [diff] [blame] | 475 | xen_swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), |
| 476 | dir, attrs); |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 477 | |
| 478 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 479 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 480 | static int |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 481 | xen_swiotlb_map_sg(struct device *dev, struct scatterlist *sgl, int nelems, |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 482 | enum dma_data_direction dir, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 483 | { |
| 484 | struct scatterlist *sg; |
| 485 | int i; |
| 486 | |
| 487 | BUG_ON(dir == DMA_NONE); |
| 488 | |
| 489 | for_each_sg(sgl, sg, nelems, i) { |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 490 | sg->dma_address = xen_swiotlb_map_page(dev, sg_page(sg), |
| 491 | sg->offset, sg->length, dir, attrs); |
| 492 | if (sg->dma_address == DMA_MAPPING_ERROR) |
| 493 | goto out_unmap; |
Stefano Stabellini | 781575c | 2013-08-05 17:30:48 +0100 | [diff] [blame] | 494 | sg_dma_len(sg) = sg->length; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 495 | } |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 496 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 497 | return nelems; |
Christoph Hellwig | 8b35d9f | 2019-04-11 09:19:58 +0200 | [diff] [blame] | 498 | out_unmap: |
| 499 | xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); |
| 500 | sg_dma_len(sgl) = 0; |
| 501 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 502 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 503 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 504 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 505 | xen_swiotlb_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, |
| 506 | int nelems, enum dma_data_direction dir) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 507 | { |
| 508 | struct scatterlist *sg; |
| 509 | int i; |
| 510 | |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 511 | for_each_sg(sgl, sg, nelems, i) { |
| 512 | xen_swiotlb_sync_single_for_cpu(dev, sg->dma_address, |
| 513 | sg->length, dir); |
| 514 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 515 | } |
| 516 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 517 | static void |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 518 | xen_swiotlb_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 519 | int nelems, enum dma_data_direction dir) |
| 520 | { |
Christoph Hellwig | 2e12dce | 2019-04-11 09:19:59 +0200 | [diff] [blame] | 521 | struct scatterlist *sg; |
| 522 | int i; |
| 523 | |
| 524 | for_each_sg(sgl, sg, nelems, i) { |
| 525 | xen_swiotlb_sync_single_for_device(dev, sg->dma_address, |
| 526 | sg->length, dir); |
| 527 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 528 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 529 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 530 | /* |
| 531 | * Return whether the given device DMA address mask can be supported |
| 532 | * properly. For example, if your device can only drive the low 24-bits |
| 533 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 534 | * this function. |
| 535 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 536 | static int |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 537 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 538 | { |
| 539 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
| 540 | } |
Stefano Stabellini | eb1ddc0 | 2013-10-09 16:56:33 +0000 | [diff] [blame] | 541 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 542 | const struct dma_map_ops xen_swiotlb_dma_ops = { |
| 543 | .alloc = xen_swiotlb_alloc_coherent, |
| 544 | .free = xen_swiotlb_free_coherent, |
| 545 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
| 546 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
| 547 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
| 548 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, |
Christoph Hellwig | aca351c | 2019-04-11 09:19:57 +0200 | [diff] [blame] | 549 | .map_sg = xen_swiotlb_map_sg, |
| 550 | .unmap_sg = xen_swiotlb_unmap_sg, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 551 | .map_page = xen_swiotlb_map_page, |
| 552 | .unmap_page = xen_swiotlb_unmap_page, |
| 553 | .dma_supported = xen_swiotlb_dma_supported, |
Christoph Hellwig | 922659e | 2019-09-02 10:45:39 +0200 | [diff] [blame] | 554 | .mmap = dma_common_mmap, |
| 555 | .get_sgtable = dma_common_get_sgtable, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 556 | }; |