Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 |
| 3 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> |
| 4 | * |
| 5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License v2.0 as published by |
| 9 | * the Free Software Foundation |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * PV guests under Xen are running in an non-contiguous memory architecture. |
| 17 | * |
| 18 | * When PCI pass-through is utilized, this necessitates an IOMMU for |
| 19 | * translating bus (DMA) to virtual and vice-versa and also providing a |
| 20 | * mechanism to have contiguous pages for device drivers operations (say DMA |
| 21 | * operations). |
| 22 | * |
| 23 | * Specifically, under Xen the Linux idea of pages is an illusion. It |
| 24 | * assumes that pages start at zero and go up to the available memory. To |
| 25 | * help with that, the Linux Xen MMU provides a lookup mechanism to |
| 26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) |
| 27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore |
| 28 | * memory is not contiguous. Xen hypervisor stitches memory for guests |
| 29 | * from different pools, which means there is no guarantee that PFN==MFN |
| 30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are |
| 31 | * allocated in descending order (high to low), meaning the guest might |
| 32 | * never get any MFN's under the 4GB mark. |
| 33 | * |
| 34 | */ |
| 35 | |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 36 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
| 37 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 38 | #include <linux/bootmem.h> |
Christoph Hellwig | ea8c64a | 2018-01-10 16:21:13 +0100 | [diff] [blame] | 39 | #include <linux/dma-direct.h> |
Paul Gortmaker | 63c9744 | 2011-07-10 13:22:07 -0400 | [diff] [blame] | 40 | #include <linux/export.h> |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 41 | #include <xen/swiotlb-xen.h> |
| 42 | #include <xen/page.h> |
| 43 | #include <xen/xen-ops.h> |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 44 | #include <xen/hvc-console.h> |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 45 | |
Stefano Stabellini | 83862cc | 2013-10-10 13:40:44 +0000 | [diff] [blame] | 46 | #include <asm/dma-mapping.h> |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 47 | #include <asm/xen/page-coherent.h> |
Konrad Rzeszutek Wilk | e1d8f62 | 2013-11-08 15:36:09 -0500 | [diff] [blame] | 48 | |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 49 | #include <trace/events/swiotlb.h> |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 50 | /* |
| 51 | * Used to do a quick range check in swiotlb_tbl_unmap_single and |
| 52 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this |
| 53 | * API. |
| 54 | */ |
| 55 | |
Christoph Hellwig | 4d048db | 2017-05-21 13:23:27 +0200 | [diff] [blame] | 56 | #define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0) |
| 57 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 58 | static char *xen_io_tlb_start, *xen_io_tlb_end; |
| 59 | static unsigned long xen_io_tlb_nslabs; |
| 60 | /* |
| 61 | * Quick lookup value of the bus address of the IOTLB. |
| 62 | */ |
| 63 | |
Konrad Rzeszutek Wilk | b8b0f55 | 2012-08-21 14:49:34 -0400 | [diff] [blame] | 64 | static u64 start_dma_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 65 | |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 66 | /* |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 67 | * Both of these functions should avoid XEN_PFN_PHYS because phys_addr_t |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 68 | * can be 32bit when dma_addr_t is 64bit leading to a loss in |
| 69 | * information if the shift is done before casting to 64bit. |
| 70 | */ |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 71 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 72 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 73 | unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); |
| 74 | dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 75 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 76 | dma |= paddr & ~XEN_PAGE_MASK; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 77 | |
| 78 | return dma; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 79 | } |
| 80 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 81 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 82 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 83 | unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); |
| 84 | dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 85 | phys_addr_t paddr = dma; |
| 86 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 87 | paddr |= baddr & ~XEN_PAGE_MASK; |
Ian Campbell | e17b2f1 | 2014-01-20 11:30:41 +0000 | [diff] [blame] | 88 | |
| 89 | return paddr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 90 | } |
| 91 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 92 | static inline dma_addr_t xen_virt_to_bus(void *address) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 93 | { |
| 94 | return xen_phys_to_bus(virt_to_phys(address)); |
| 95 | } |
| 96 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 97 | static int check_pages_physically_contiguous(unsigned long xen_pfn, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 98 | unsigned int offset, |
| 99 | size_t length) |
| 100 | { |
Julien Grall | 32e0987 | 2015-08-07 17:34:35 +0100 | [diff] [blame] | 101 | unsigned long next_bfn; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 102 | int i; |
| 103 | int nr_pages; |
| 104 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 105 | next_bfn = pfn_to_bfn(xen_pfn); |
| 106 | nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 107 | |
| 108 | for (i = 1; i < nr_pages; i++) { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 109 | if (pfn_to_bfn(++xen_pfn) != ++next_bfn) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 110 | return 0; |
| 111 | } |
| 112 | return 1; |
| 113 | } |
| 114 | |
Stefano Stabellini | 6b42a7e | 2013-10-25 10:33:27 +0000 | [diff] [blame] | 115 | static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 116 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 117 | unsigned long xen_pfn = XEN_PFN_DOWN(p); |
| 118 | unsigned int offset = p & ~XEN_PAGE_MASK; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 119 | |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 120 | if (offset + size <= XEN_PAGE_SIZE) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 121 | return 0; |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 122 | if (check_pages_physically_contiguous(xen_pfn, offset, size)) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 123 | return 0; |
| 124 | return 1; |
| 125 | } |
| 126 | |
| 127 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) |
| 128 | { |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 129 | unsigned long bfn = XEN_PFN_DOWN(dma_addr); |
| 130 | unsigned long xen_pfn = bfn_to_local_pfn(bfn); |
| 131 | phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 132 | |
| 133 | /* If the address is outside our domain, it CAN |
| 134 | * have the same virtual address as another address |
| 135 | * in our domain. Therefore _only_ check address within our domain. |
| 136 | */ |
Julien Grall | 9435cce | 2015-09-09 15:18:45 +0100 | [diff] [blame] | 137 | if (pfn_valid(PFN_DOWN(paddr))) { |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 138 | return paddr >= virt_to_phys(xen_io_tlb_start) && |
| 139 | paddr < virt_to_phys(xen_io_tlb_end); |
| 140 | } |
| 141 | return 0; |
| 142 | } |
| 143 | |
| 144 | static int max_dma_bits = 32; |
| 145 | |
| 146 | static int |
| 147 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) |
| 148 | { |
| 149 | int i, rc; |
| 150 | int dma_bits; |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 151 | dma_addr_t dma_handle; |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 152 | phys_addr_t p = virt_to_phys(buf); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 153 | |
| 154 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; |
| 155 | |
| 156 | i = 0; |
| 157 | do { |
| 158 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); |
| 159 | |
| 160 | do { |
| 161 | rc = xen_create_contiguous_region( |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 162 | p + (i << IO_TLB_SHIFT), |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 163 | get_order(slabs << IO_TLB_SHIFT), |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 164 | dma_bits, &dma_handle); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 165 | } while (rc && dma_bits++ < max_dma_bits); |
| 166 | if (rc) |
| 167 | return rc; |
| 168 | |
| 169 | i += slabs; |
| 170 | } while (i < nslabs); |
| 171 | return 0; |
| 172 | } |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 173 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 174 | { |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 175 | if (!nr_tbl) { |
FUJITA Tomonori | 5f98ecd | 2011-06-05 11:47:29 +0900 | [diff] [blame] | 176 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); |
| 177 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 178 | } else |
| 179 | xen_io_tlb_nslabs = nr_tbl; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 180 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 181 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 182 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 183 | |
| 184 | enum xen_swiotlb_err { |
| 185 | XEN_SWIOTLB_UNKNOWN = 0, |
| 186 | XEN_SWIOTLB_ENOMEM, |
| 187 | XEN_SWIOTLB_EFIXUP |
| 188 | }; |
| 189 | |
| 190 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) |
| 191 | { |
| 192 | switch (err) { |
| 193 | case XEN_SWIOTLB_ENOMEM: |
| 194 | return "Cannot allocate Xen-SWIOTLB buffer\n"; |
| 195 | case XEN_SWIOTLB_EFIXUP: |
| 196 | return "Failed to get contiguous memory for DMA from Xen!\n"\ |
| 197 | "You either: don't have the permissions, do not have"\ |
| 198 | " enough free memory under 4GB, or the hypervisor memory"\ |
| 199 | " is too fragmented!"; |
| 200 | default: |
| 201 | break; |
| 202 | } |
| 203 | return ""; |
| 204 | } |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 205 | int __ref xen_swiotlb_init(int verbose, bool early) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 206 | { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 207 | unsigned long bytes, order; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 208 | int rc = -ENOMEM; |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 209 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 210 | unsigned int repeat = 3; |
| 211 | |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 212 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 213 | retry: |
Konrad Rzeszutek Wilk | 1cef36a | 2012-08-23 13:55:26 -0400 | [diff] [blame] | 214 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 215 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 216 | /* |
| 217 | * Get IO TLB memory from any location. |
| 218 | */ |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 219 | if (early) |
| 220 | xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); |
| 221 | else { |
| 222 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) |
| 223 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) |
| 224 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { |
Stefano Stabellini | 8746515 | 2015-04-24 10:16:40 +0100 | [diff] [blame] | 225 | xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 226 | if (xen_io_tlb_start) |
| 227 | break; |
| 228 | order--; |
| 229 | } |
| 230 | if (order != get_order(bytes)) { |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 231 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
| 232 | (PAGE_SIZE << order) >> 20); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 233 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
| 234 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; |
| 235 | } |
| 236 | } |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 237 | if (!xen_io_tlb_start) { |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 238 | m_ret = XEN_SWIOTLB_ENOMEM; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 239 | goto error; |
| 240 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 241 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
| 242 | /* |
| 243 | * And replace that memory with pages under 4GB. |
| 244 | */ |
| 245 | rc = xen_swiotlb_fixup(xen_io_tlb_start, |
| 246 | bytes, |
| 247 | xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 248 | if (rc) { |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 249 | if (early) |
| 250 | free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); |
| 251 | else { |
| 252 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 253 | xen_io_tlb_start = NULL; |
| 254 | } |
Konrad Rzeszutek Wilk | 5bab786 | 2012-08-23 14:03:55 -0400 | [diff] [blame] | 255 | m_ret = XEN_SWIOTLB_EFIXUP; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 256 | goto error; |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 257 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 258 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 259 | if (early) { |
Yinghai Lu | ac2cbab | 2013-01-24 12:20:16 -0800 | [diff] [blame] | 260 | if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, |
| 261 | verbose)) |
| 262 | panic("Cannot allocate SWIOTLB buffer"); |
Konrad Rzeszutek Wilk | c468bde | 2012-09-17 10:20:09 -0400 | [diff] [blame] | 263 | rc = 0; |
| 264 | } else |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 265 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
Konrad Rzeszutek Wilk | 7453c54 | 2016-12-20 10:02:02 -0500 | [diff] [blame] | 266 | |
| 267 | if (!rc) |
| 268 | swiotlb_set_max_segment(PAGE_SIZE); |
| 269 | |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 270 | return rc; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 271 | error: |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 272 | if (repeat--) { |
| 273 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ |
| 274 | (xen_io_tlb_nslabs >> 1)); |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 275 | pr_info("Lowering to %luMB\n", |
| 276 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); |
Konrad Rzeszutek Wilk | f4b2f07 | 2011-07-22 12:46:43 -0400 | [diff] [blame] | 277 | goto retry; |
| 278 | } |
Joe Perches | 283c097 | 2013-06-28 03:21:41 -0700 | [diff] [blame] | 279 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
Konrad Rzeszutek Wilk | b827760 | 2012-08-23 14:36:15 -0400 | [diff] [blame] | 280 | if (early) |
| 281 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); |
| 282 | else |
| 283 | free_pages((unsigned long)xen_io_tlb_start, order); |
| 284 | return rc; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 285 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 286 | |
| 287 | static void * |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 288 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
Andrzej Pietrasiewicz | baa676f | 2012-03-27 14:28:18 +0200 | [diff] [blame] | 289 | dma_addr_t *dma_handle, gfp_t flags, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 290 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 291 | { |
| 292 | void *ret; |
| 293 | int order = get_order(size); |
| 294 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 295 | phys_addr_t phys; |
| 296 | dma_addr_t dev_addr; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 297 | |
| 298 | /* |
| 299 | * Ignore region specifiers - the kernel's ideas of |
| 300 | * pseudo-phys memory layout has nothing to do with the |
| 301 | * machine physical layout. We can't allocate highmem |
| 302 | * because we can't return a pointer to it. |
| 303 | */ |
| 304 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); |
| 305 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 306 | /* On ARM this function returns an ioremap'ped virtual address for |
| 307 | * which virt_to_phys doesn't return the corresponding physical |
| 308 | * address. In fact on ARM virt_to_phys only works for kernel direct |
| 309 | * mapped RAM memory. Also see comment below. |
| 310 | */ |
| 311 | ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 312 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 313 | if (!ret) |
| 314 | return ret; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 315 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 316 | if (hwdev && hwdev->coherent_dma_mask) |
Christoph Hellwig | 038d07a | 2018-03-19 11:38:14 +0100 | [diff] [blame^] | 317 | dma_mask = hwdev->coherent_dma_mask; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 318 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 319 | /* At this point dma_handle is the physical address, next we are |
| 320 | * going to set it to the machine address. |
| 321 | * Do not use virt_to_phys(ret) because on ARM it doesn't correspond |
| 322 | * to *dma_handle. */ |
| 323 | phys = *dma_handle; |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 324 | dev_addr = xen_phys_to_bus(phys); |
| 325 | if (((dev_addr + size - 1 <= dma_mask)) && |
| 326 | !range_straddles_page_boundary(phys, size)) |
| 327 | *dma_handle = dev_addr; |
| 328 | else { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 329 | if (xen_create_contiguous_region(phys, order, |
Stefano Stabellini | 6990890 | 2013-10-09 16:56:32 +0000 | [diff] [blame] | 330 | fls64(dma_mask), dma_handle) != 0) { |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 331 | xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 332 | return NULL; |
| 333 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 334 | } |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 335 | memset(ret, 0, size); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 336 | return ret; |
| 337 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 338 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 339 | static void |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 340 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 341 | dma_addr_t dev_addr, unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 342 | { |
| 343 | int order = get_order(size); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 344 | phys_addr_t phys; |
| 345 | u64 dma_mask = DMA_BIT_MASK(32); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 346 | |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 347 | if (hwdev && hwdev->coherent_dma_mask) |
| 348 | dma_mask = hwdev->coherent_dma_mask; |
| 349 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 350 | /* do not use virt_to_phys because on ARM it doesn't return you the |
| 351 | * physical address */ |
| 352 | phys = xen_bus_to_phys(dev_addr); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 353 | |
| 354 | if (((dev_addr + size - 1 > dma_mask)) || |
| 355 | range_straddles_page_boundary(phys, size)) |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 356 | xen_destroy_contiguous_region(phys, order); |
Konrad Rzeszutek Wilk | 6810df8 | 2011-08-25 16:13:54 -0400 | [diff] [blame] | 357 | |
Stefano Stabellini | 1b65c4e | 2013-10-10 13:41:10 +0000 | [diff] [blame] | 358 | xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 359 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 360 | |
| 361 | /* |
| 362 | * Map a single buffer of the indicated size for DMA in streaming mode. The |
| 363 | * physical address to use is returned. |
| 364 | * |
| 365 | * Once the device is given the dma address, the device owns this memory until |
| 366 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. |
| 367 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 368 | static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 369 | unsigned long offset, size_t size, |
| 370 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 371 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 372 | { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 373 | phys_addr_t map, phys = page_to_phys(page) + offset; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 374 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 375 | |
| 376 | BUG_ON(dir == DMA_NONE); |
| 377 | /* |
| 378 | * If the address happens to be in the device's DMA window, |
| 379 | * we can safely return the device addr and not worry about bounce |
| 380 | * buffering it. |
| 381 | */ |
| 382 | if (dma_capable(dev, dev_addr, size) && |
Stefano Stabellini | a4dba13 | 2014-11-21 11:07:39 +0000 | [diff] [blame] | 383 | !range_straddles_page_boundary(phys, size) && |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 384 | !xen_arch_need_swiotlb(dev, phys, dev_addr) && |
Geert Uytterhoeven | ae7871b | 2016-12-16 14:28:41 +0100 | [diff] [blame] | 385 | (swiotlb_force != SWIOTLB_FORCE)) { |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 386 | /* we are not interested in the dma_addr returned by |
| 387 | * xen_dma_map_page, only in the potential cache flushes executed |
| 388 | * by the function. */ |
Stefano Stabellini | a0f2dee | 2014-11-21 11:04:39 +0000 | [diff] [blame] | 389 | xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 390 | return dev_addr; |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 391 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 392 | |
| 393 | /* |
| 394 | * Oh well, have to allocate and map a bounce buffer. |
| 395 | */ |
Zoltan Kiss | 2b2b614 | 2013-09-04 21:11:05 +0100 | [diff] [blame] | 396 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
| 397 | |
Alexander Duyck | 0443fa0 | 2016-11-02 07:13:02 -0400 | [diff] [blame] | 398 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir, |
| 399 | attrs); |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 400 | if (map == SWIOTLB_MAP_ERROR) |
Christoph Hellwig | 4d048db | 2017-05-21 13:23:27 +0200 | [diff] [blame] | 401 | return XEN_SWIOTLB_ERROR_CODE; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 402 | |
Stefano Stabellini | f1225ee | 2017-01-19 10:39:09 -0800 | [diff] [blame] | 403 | dev_addr = xen_phys_to_bus(map); |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 404 | xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), |
Stefano Stabellini | a0f2dee | 2014-11-21 11:04:39 +0000 | [diff] [blame] | 405 | dev_addr, map & ~PAGE_MASK, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 406 | |
| 407 | /* |
| 408 | * Ensure that the address returned is DMA'ble |
| 409 | */ |
Alexander Duyck | 7641842 | 2016-11-02 07:12:47 -0400 | [diff] [blame] | 410 | if (dma_capable(dev, dev_addr, size)) |
| 411 | return dev_addr; |
| 412 | |
Alexander Duyck | d29fa0c | 2016-11-10 07:05:31 -0500 | [diff] [blame] | 413 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
| 414 | swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); |
Alexander Duyck | 7641842 | 2016-11-02 07:12:47 -0400 | [diff] [blame] | 415 | |
Christoph Hellwig | 4d048db | 2017-05-21 13:23:27 +0200 | [diff] [blame] | 416 | return XEN_SWIOTLB_ERROR_CODE; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 417 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 418 | |
| 419 | /* |
| 420 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
| 421 | * match what was provided for in a previous xen_swiotlb_map_page call. All |
| 422 | * other usages are undefined. |
| 423 | * |
| 424 | * After this call, reads by the cpu to the buffer are guaranteed to see |
| 425 | * whatever the device wrote there. |
| 426 | */ |
| 427 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 428 | size_t size, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 429 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 430 | { |
| 431 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 432 | |
| 433 | BUG_ON(dir == DMA_NONE); |
| 434 | |
Stefano Stabellini | d6883e6 | 2014-11-21 11:09:39 +0000 | [diff] [blame] | 435 | xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 436 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 437 | /* NOTE: We use dev_addr here, not paddr! */ |
| 438 | if (is_xen_swiotlb_buffer(dev_addr)) { |
Alexander Duyck | 0443fa0 | 2016-11-02 07:13:02 -0400 | [diff] [blame] | 439 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 440 | return; |
| 441 | } |
| 442 | |
| 443 | if (dir != DMA_FROM_DEVICE) |
| 444 | return; |
| 445 | |
| 446 | /* |
| 447 | * phys_to_virt doesn't work with hihgmem page but we could |
| 448 | * call dma_mark_clean() with hihgmem page here. However, we |
| 449 | * are fine since dma_mark_clean() is null on POWERPC. We can |
| 450 | * make dma_mark_clean() take a physical address if necessary. |
| 451 | */ |
| 452 | dma_mark_clean(phys_to_virt(paddr), size); |
| 453 | } |
| 454 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 455 | static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 456 | size_t size, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 457 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 458 | { |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 459 | xen_unmap_single(hwdev, dev_addr, size, dir, attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 460 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 461 | |
| 462 | /* |
| 463 | * Make physical memory consistent for a single streaming mode DMA translation |
| 464 | * after a transfer. |
| 465 | * |
| 466 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer |
| 467 | * using the cpu, yet do not wish to teardown the dma mapping, you must |
| 468 | * call this function before doing so. At the next point you give the dma |
| 469 | * address back to the card, you must first perform a |
| 470 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer |
| 471 | */ |
| 472 | static void |
| 473 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
| 474 | size_t size, enum dma_data_direction dir, |
| 475 | enum dma_sync_target target) |
| 476 | { |
| 477 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); |
| 478 | |
| 479 | BUG_ON(dir == DMA_NONE); |
| 480 | |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 481 | if (target == SYNC_FOR_CPU) |
Stefano Stabellini | d6883e6 | 2014-11-21 11:09:39 +0000 | [diff] [blame] | 482 | xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 483 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 484 | /* NOTE: We use dev_addr here, not paddr! */ |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 485 | if (is_xen_swiotlb_buffer(dev_addr)) |
Alexander Duyck | fbfda89 | 2012-10-15 10:19:49 -0700 | [diff] [blame] | 486 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 487 | |
| 488 | if (target == SYNC_FOR_DEVICE) |
Stefano Stabellini | 9490c6c | 2014-11-21 16:55:12 +0000 | [diff] [blame] | 489 | xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 490 | |
| 491 | if (dir != DMA_FROM_DEVICE) |
| 492 | return; |
| 493 | |
| 494 | dma_mark_clean(phys_to_virt(paddr), size); |
| 495 | } |
| 496 | |
| 497 | void |
| 498 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
| 499 | size_t size, enum dma_data_direction dir) |
| 500 | { |
| 501 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); |
| 502 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 503 | |
| 504 | void |
| 505 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
| 506 | size_t size, enum dma_data_direction dir) |
| 507 | { |
| 508 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); |
| 509 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 510 | |
| 511 | /* |
| 512 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
| 513 | * concerning calls here are the same as for swiotlb_unmap_page() above. |
| 514 | */ |
| 515 | static void |
| 516 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 517 | int nelems, enum dma_data_direction dir, |
| 518 | unsigned long attrs) |
| 519 | { |
| 520 | struct scatterlist *sg; |
| 521 | int i; |
| 522 | |
| 523 | BUG_ON(dir == DMA_NONE); |
| 524 | |
| 525 | for_each_sg(sgl, sg, nelems, i) |
| 526 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs); |
| 527 | |
| 528 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 529 | |
| 530 | /* |
| 531 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
| 532 | * This is the scatter-gather version of the above xen_swiotlb_map_page |
| 533 | * interface. Here the scatter gather list elements are each tagged with the |
| 534 | * appropriate dma address and length. They are obtained via |
| 535 | * sg_dma_{address,length}(SG). |
| 536 | * |
| 537 | * NOTE: An implementation may be able to use a smaller number of |
| 538 | * DMA address/length pairs than there are SG table elements. |
| 539 | * (for example via virtual mapping capabilities) |
| 540 | * The routine returns the number of addr/length pairs actually |
| 541 | * used, at most nents. |
| 542 | * |
| 543 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the |
| 544 | * same here. |
| 545 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 546 | static int |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 547 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
| 548 | int nelems, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 549 | unsigned long attrs) |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 550 | { |
| 551 | struct scatterlist *sg; |
| 552 | int i; |
| 553 | |
| 554 | BUG_ON(dir == DMA_NONE); |
| 555 | |
| 556 | for_each_sg(sgl, sg, nelems, i) { |
| 557 | phys_addr_t paddr = sg_phys(sg); |
| 558 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); |
| 559 | |
Geert Uytterhoeven | ae7871b | 2016-12-16 14:28:41 +0100 | [diff] [blame] | 560 | if (swiotlb_force == SWIOTLB_FORCE || |
Julien Grall | 291be10 | 2015-09-09 15:17:33 +0100 | [diff] [blame] | 561 | xen_arch_need_swiotlb(hwdev, paddr, dev_addr) || |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 562 | !dma_capable(hwdev, dev_addr, sg->length) || |
| 563 | range_straddles_page_boundary(paddr, sg->length)) { |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 564 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
| 565 | start_dma_addr, |
| 566 | sg_phys(sg), |
| 567 | sg->length, |
Alexander Duyck | 0443fa0 | 2016-11-02 07:13:02 -0400 | [diff] [blame] | 568 | dir, attrs); |
Alexander Duyck | e05ed4d | 2012-10-15 10:19:39 -0700 | [diff] [blame] | 569 | if (map == SWIOTLB_MAP_ERROR) { |
Stefano Stabellini | 783d028 | 2013-10-25 10:33:26 +0000 | [diff] [blame] | 570 | dev_warn(hwdev, "swiotlb buffer is full\n"); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 571 | /* Don't panic here, we expect map_sg users |
| 572 | to do proper error handling. */ |
Alexander Duyck | 0443fa0 | 2016-11-02 07:13:02 -0400 | [diff] [blame] | 573 | attrs |= DMA_ATTR_SKIP_CPU_SYNC; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 574 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
| 575 | attrs); |
Stefano Stabellini | 781575c | 2013-08-05 17:30:48 +0100 | [diff] [blame] | 576 | sg_dma_len(sgl) = 0; |
Stefano Stabellini | 1517760 | 2013-10-29 00:37:37 +0000 | [diff] [blame] | 577 | return 0; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 578 | } |
Stefano Stabellini | f1225ee | 2017-01-19 10:39:09 -0800 | [diff] [blame] | 579 | dev_addr = xen_phys_to_bus(map); |
Stefano Stabellini | 71bfae9 | 2013-11-15 14:22:15 +0000 | [diff] [blame] | 580 | xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), |
Stefano Stabellini | a0f2dee | 2014-11-21 11:04:39 +0000 | [diff] [blame] | 581 | dev_addr, |
Stefano Stabellini | 71bfae9 | 2013-11-15 14:22:15 +0000 | [diff] [blame] | 582 | map & ~PAGE_MASK, |
| 583 | sg->length, |
| 584 | dir, |
| 585 | attrs); |
Stefano Stabellini | f1225ee | 2017-01-19 10:39:09 -0800 | [diff] [blame] | 586 | sg->dma_address = dev_addr; |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 587 | } else { |
| 588 | /* we are not interested in the dma_addr returned by |
| 589 | * xen_dma_map_page, only in the potential cache flushes executed |
| 590 | * by the function. */ |
| 591 | xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), |
Stefano Stabellini | a0f2dee | 2014-11-21 11:04:39 +0000 | [diff] [blame] | 592 | dev_addr, |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 593 | paddr & ~PAGE_MASK, |
| 594 | sg->length, |
| 595 | dir, |
| 596 | attrs); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 597 | sg->dma_address = dev_addr; |
Stefano Stabellini | 6cf0546 | 2013-10-25 10:33:25 +0000 | [diff] [blame] | 598 | } |
Stefano Stabellini | 781575c | 2013-08-05 17:30:48 +0100 | [diff] [blame] | 599 | sg_dma_len(sg) = sg->length; |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 600 | } |
| 601 | return nelems; |
| 602 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 603 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 604 | /* |
| 605 | * Make physical memory consistent for a set of streaming mode DMA translations |
| 606 | * after a transfer. |
| 607 | * |
| 608 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules |
| 609 | * and usage. |
| 610 | */ |
| 611 | static void |
| 612 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, |
| 613 | int nelems, enum dma_data_direction dir, |
| 614 | enum dma_sync_target target) |
| 615 | { |
| 616 | struct scatterlist *sg; |
| 617 | int i; |
| 618 | |
| 619 | for_each_sg(sgl, sg, nelems, i) |
| 620 | xen_swiotlb_sync_single(hwdev, sg->dma_address, |
Stefano Stabellini | 781575c | 2013-08-05 17:30:48 +0100 | [diff] [blame] | 621 | sg_dma_len(sg), dir, target); |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 622 | } |
| 623 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 624 | static void |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 625 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 626 | int nelems, enum dma_data_direction dir) |
| 627 | { |
| 628 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); |
| 629 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 630 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 631 | static void |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 632 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 633 | int nelems, enum dma_data_direction dir) |
| 634 | { |
| 635 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); |
| 636 | } |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 637 | |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 638 | /* |
| 639 | * Return whether the given device DMA address mask can be supported |
| 640 | * properly. For example, if your device can only drive the low 24-bits |
| 641 | * during bus mastering, then you would pass 0x00ffffff as the mask to |
| 642 | * this function. |
| 643 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 644 | static int |
Konrad Rzeszutek Wilk | b097186f | 2010-05-11 10:05:49 -0400 | [diff] [blame] | 645 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) |
| 646 | { |
| 647 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; |
| 648 | } |
Stefano Stabellini | eb1ddc0 | 2013-10-09 16:56:33 +0000 | [diff] [blame] | 649 | |
Stefano Stabellini | 7e91c7d | 2017-02-07 19:58:02 +0200 | [diff] [blame] | 650 | /* |
| 651 | * Create userspace mapping for the DMA-coherent memory. |
| 652 | * This function should be called with the pages from the current domain only, |
| 653 | * passing pages mapped from other domains would lead to memory corruption. |
| 654 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 655 | static int |
Stefano Stabellini | 7e91c7d | 2017-02-07 19:58:02 +0200 | [diff] [blame] | 656 | xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 657 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 658 | unsigned long attrs) |
| 659 | { |
| 660 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 661 | if (xen_get_dma_ops(dev)->mmap) |
| 662 | return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, |
Stefano Stabellini | 7e91c7d | 2017-02-07 19:58:02 +0200 | [diff] [blame] | 663 | dma_addr, size, attrs); |
| 664 | #endif |
| 665 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
| 666 | } |
Andrii Anisov | 69369f5 | 2017-02-07 19:58:03 +0200 | [diff] [blame] | 667 | |
| 668 | /* |
| 669 | * This function should be called with the pages from the current domain only, |
| 670 | * passing pages mapped from other domains would lead to memory corruption. |
| 671 | */ |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 672 | static int |
Andrii Anisov | 69369f5 | 2017-02-07 19:58:03 +0200 | [diff] [blame] | 673 | xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 674 | void *cpu_addr, dma_addr_t handle, size_t size, |
| 675 | unsigned long attrs) |
| 676 | { |
| 677 | #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 678 | if (xen_get_dma_ops(dev)->get_sgtable) { |
Andrii Anisov | 69369f5 | 2017-02-07 19:58:03 +0200 | [diff] [blame] | 679 | #if 0 |
| 680 | /* |
| 681 | * This check verifies that the page belongs to the current domain and |
| 682 | * is not one mapped from another domain. |
| 683 | * This check is for debug only, and should not go to production build |
| 684 | */ |
| 685 | unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); |
| 686 | BUG_ON (!page_is_ram(bfn)); |
| 687 | #endif |
Stefano Stabellini | d5ff506 | 2017-04-13 14:04:22 -0700 | [diff] [blame] | 688 | return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, |
Andrii Anisov | 69369f5 | 2017-02-07 19:58:03 +0200 | [diff] [blame] | 689 | handle, size, attrs); |
| 690 | } |
| 691 | #endif |
| 692 | return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size); |
| 693 | } |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 694 | |
Christoph Hellwig | 4d048db | 2017-05-21 13:23:27 +0200 | [diff] [blame] | 695 | static int xen_swiotlb_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 696 | { |
| 697 | return dma_addr == XEN_SWIOTLB_ERROR_CODE; |
| 698 | } |
| 699 | |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 700 | const struct dma_map_ops xen_swiotlb_dma_ops = { |
| 701 | .alloc = xen_swiotlb_alloc_coherent, |
| 702 | .free = xen_swiotlb_free_coherent, |
| 703 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
| 704 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
| 705 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
| 706 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, |
| 707 | .map_sg = xen_swiotlb_map_sg_attrs, |
| 708 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, |
| 709 | .map_page = xen_swiotlb_map_page, |
| 710 | .unmap_page = xen_swiotlb_unmap_page, |
| 711 | .dma_supported = xen_swiotlb_dma_supported, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 712 | .mmap = xen_swiotlb_dma_mmap, |
| 713 | .get_sgtable = xen_swiotlb_get_sgtable, |
Christoph Hellwig | 4d048db | 2017-05-21 13:23:27 +0200 | [diff] [blame] | 714 | .mapping_error = xen_swiotlb_mapping_error, |
Christoph Hellwig | dceb1a6 | 2017-05-21 13:15:13 +0200 | [diff] [blame] | 715 | }; |