Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 3 | * Copyright (C) 2018 Christoph Hellwig. |
| 4 | * |
| 5 | * DMA operations that map physical memory directly without using an IOMMU. |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 6 | */ |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> /* for max_pfn */ |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 8 | #include <linux/export.h> |
| 9 | #include <linux/mm.h> |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 10 | #include <linux/dma-direct.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 12 | #include <linux/dma-contiguous.h> |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 13 | #include <linux/dma-noncoherent.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 14 | #include <linux/pfn.h> |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 15 | #include <linux/set_memory.h> |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 16 | #include <linux/swiotlb.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 17 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 18 | /* |
| 19 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but |
| 20 | * some use it for entirely different regions: |
| 21 | */ |
| 22 | #ifndef ARCH_ZONE_DMA_BITS |
| 23 | #define ARCH_ZONE_DMA_BITS 24 |
| 24 | #endif |
| 25 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 26 | /* |
| 27 | * For AMD SEV all DMA must be to unencrypted addresses. |
| 28 | */ |
| 29 | static inline bool force_dma_unencrypted(void) |
| 30 | { |
| 31 | return sev_active(); |
| 32 | } |
| 33 | |
Christoph Hellwig | 58dfd4a | 2018-12-03 07:43:05 +0100 | [diff] [blame] | 34 | static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 35 | { |
Christoph Hellwig | 58dfd4a | 2018-12-03 07:43:05 +0100 | [diff] [blame] | 36 | if (!dev->dma_mask) { |
| 37 | dev_err_once(dev, "DMA map on device without dma_mask\n"); |
| 38 | } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { |
| 39 | dev_err_once(dev, |
| 40 | "overflow %pad+%zu of DMA mask %llx bus mask %llx\n", |
| 41 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 42 | } |
Christoph Hellwig | 58dfd4a | 2018-12-03 07:43:05 +0100 | [diff] [blame] | 43 | WARN_ON_ONCE(1); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 44 | } |
| 45 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 46 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
| 47 | phys_addr_t phys) |
| 48 | { |
| 49 | if (force_dma_unencrypted()) |
| 50 | return __phys_to_dma(dev, phys); |
| 51 | return phys_to_dma(dev, phys); |
| 52 | } |
| 53 | |
| 54 | u64 dma_direct_get_required_mask(struct device *dev) |
| 55 | { |
| 56 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); |
| 57 | |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 58 | if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) |
| 59 | max_dma = dev->bus_dma_mask; |
| 60 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 61 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; |
| 62 | } |
| 63 | |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 64 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
| 65 | u64 *phys_mask) |
| 66 | { |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 67 | if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) |
| 68 | dma_mask = dev->bus_dma_mask; |
| 69 | |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 70 | if (force_dma_unencrypted()) |
| 71 | *phys_mask = __dma_to_phys(dev, dma_mask); |
| 72 | else |
| 73 | *phys_mask = dma_to_phys(dev, dma_mask); |
| 74 | |
Christoph Hellwig | 79ac32a | 2018-10-01 07:40:53 -0700 | [diff] [blame] | 75 | /* |
| 76 | * Optimistically try the zone that the physical address mask falls |
| 77 | * into first. If that returns memory that isn't actually addressable |
| 78 | * we will fallback to the next lower zone and try again. |
| 79 | * |
| 80 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding |
| 81 | * zones. |
| 82 | */ |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 83 | if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
| 84 | return GFP_DMA; |
| 85 | if (*phys_mask <= DMA_BIT_MASK(32)) |
| 86 | return GFP_DMA32; |
| 87 | return 0; |
| 88 | } |
| 89 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 90 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
| 91 | { |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 92 | return phys_to_dma_direct(dev, phys) + size - 1 <= |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 93 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 94 | } |
| 95 | |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 96 | struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 97 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 98 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 99 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 100 | int page_order = get_order(size); |
| 101 | struct page *page = NULL; |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 102 | u64 phys_mask; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 103 | |
Christoph Hellwig | b9fd042 | 2018-09-24 13:10:34 +0200 | [diff] [blame] | 104 | if (attrs & DMA_ATTR_NO_WARN) |
| 105 | gfp |= __GFP_NOWARN; |
| 106 | |
Christoph Hellwig | e89f5b3 | 2018-03-28 15:35:35 +0200 | [diff] [blame] | 107 | /* we always manually zero the memory once we are done: */ |
| 108 | gfp &= ~__GFP_ZERO; |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 109 | gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
| 110 | &phys_mask); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 111 | again: |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 112 | /* CMA can be used only in the context which permits sleeping */ |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 113 | if (gfpflags_allow_blocking(gfp)) { |
Marek Szyprowski | d834c5a | 2018-08-17 15:49:00 -0700 | [diff] [blame] | 114 | page = dma_alloc_from_contiguous(dev, count, page_order, |
| 115 | gfp & __GFP_NOWARN); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 116 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 117 | dma_release_from_contiguous(dev, page, count); |
| 118 | page = NULL; |
| 119 | } |
| 120 | } |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 121 | if (!page) |
Christoph Hellwig | 21f237e | 2017-12-22 11:55:23 +0100 | [diff] [blame] | 122 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 123 | |
| 124 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 125 | __free_pages(page, page_order); |
| 126 | page = NULL; |
| 127 | |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 128 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 129 | phys_mask < DMA_BIT_MASK(64) && |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 130 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
| 131 | gfp |= GFP_DMA32; |
| 132 | goto again; |
| 133 | } |
| 134 | |
Christoph Hellwig | fbce251 | 2019-02-13 08:01:03 +0100 | [diff] [blame^] | 135 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 136 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
| 137 | goto again; |
| 138 | } |
| 139 | } |
| 140 | |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 141 | return page; |
| 142 | } |
| 143 | |
| 144 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 145 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 146 | { |
| 147 | struct page *page; |
| 148 | void *ret; |
| 149 | |
| 150 | page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 151 | if (!page) |
| 152 | return NULL; |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 153 | |
Christoph Hellwig | 704f2c2 | 2018-09-22 20:47:26 +0200 | [diff] [blame] | 154 | if (PageHighMem(page)) { |
| 155 | /* |
| 156 | * Depending on the cma= arguments and per-arch setup |
| 157 | * dma_alloc_from_contiguous could return highmem pages. |
| 158 | * Without remapping there is no way to return them here, |
| 159 | * so log an error and fail. |
| 160 | */ |
| 161 | dev_info(dev, "Rejecting highmem page from CMA.\n"); |
| 162 | __dma_direct_free_pages(dev, size, page); |
| 163 | return NULL; |
| 164 | } |
| 165 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 166 | ret = page_address(page); |
| 167 | if (force_dma_unencrypted()) { |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 168 | set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 169 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); |
| 170 | } else { |
| 171 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
| 172 | } |
| 173 | memset(ret, 0, size); |
| 174 | return ret; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 175 | } |
| 176 | |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 177 | void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page) |
| 178 | { |
| 179 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 180 | |
| 181 | if (!dma_release_from_contiguous(dev, page, count)) |
| 182 | __free_pages(page, get_order(size)); |
| 183 | } |
| 184 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 185 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 186 | dma_addr_t dma_addr, unsigned long attrs) |
| 187 | { |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 188 | unsigned int page_order = get_order(size); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 189 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 190 | if (force_dma_unencrypted()) |
| 191 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 192 | __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr)); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 193 | } |
| 194 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 195 | void *dma_direct_alloc(struct device *dev, size_t size, |
| 196 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 197 | { |
| 198 | if (!dev_is_dma_coherent(dev)) |
| 199 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); |
| 200 | return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); |
| 201 | } |
| 202 | |
| 203 | void dma_direct_free(struct device *dev, size_t size, |
| 204 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) |
| 205 | { |
| 206 | if (!dev_is_dma_coherent(dev)) |
| 207 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
| 208 | else |
| 209 | dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); |
| 210 | } |
| 211 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 212 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 213 | defined(CONFIG_SWIOTLB) |
| 214 | void dma_direct_sync_single_for_device(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 215 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 216 | { |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 217 | phys_addr_t paddr = dma_to_phys(dev, addr); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 218 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 219 | if (unlikely(is_swiotlb_buffer(paddr))) |
| 220 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); |
| 221 | |
| 222 | if (!dev_is_dma_coherent(dev)) |
| 223 | arch_sync_dma_for_device(dev, paddr, size, dir); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 224 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 225 | EXPORT_SYMBOL(dma_direct_sync_single_for_device); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 226 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 227 | void dma_direct_sync_sg_for_device(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 228 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 229 | { |
| 230 | struct scatterlist *sg; |
| 231 | int i; |
| 232 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 233 | for_each_sg(sgl, sg, nents, i) { |
| 234 | if (unlikely(is_swiotlb_buffer(sg_phys(sg)))) |
| 235 | swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, |
| 236 | dir, SYNC_FOR_DEVICE); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 237 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 238 | if (!dev_is_dma_coherent(dev)) |
| 239 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, |
| 240 | dir); |
| 241 | } |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 242 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 243 | EXPORT_SYMBOL(dma_direct_sync_sg_for_device); |
Christoph Hellwig | 17ac524 | 2018-12-03 11:14:09 +0100 | [diff] [blame] | 244 | #endif |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 245 | |
| 246 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 247 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
| 248 | defined(CONFIG_SWIOTLB) |
| 249 | void dma_direct_sync_single_for_cpu(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 250 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 251 | { |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 252 | phys_addr_t paddr = dma_to_phys(dev, addr); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 253 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 254 | if (!dev_is_dma_coherent(dev)) { |
| 255 | arch_sync_dma_for_cpu(dev, paddr, size, dir); |
| 256 | arch_sync_dma_for_cpu_all(dev); |
| 257 | } |
| 258 | |
| 259 | if (unlikely(is_swiotlb_buffer(paddr))) |
| 260 | swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 261 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 262 | EXPORT_SYMBOL(dma_direct_sync_single_for_cpu); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 263 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 264 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 265 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 266 | { |
| 267 | struct scatterlist *sg; |
| 268 | int i; |
| 269 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 270 | for_each_sg(sgl, sg, nents, i) { |
| 271 | if (!dev_is_dma_coherent(dev)) |
| 272 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); |
| 273 | |
| 274 | if (unlikely(is_swiotlb_buffer(sg_phys(sg)))) |
| 275 | swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir, |
| 276 | SYNC_FOR_CPU); |
| 277 | } |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 278 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 279 | if (!dev_is_dma_coherent(dev)) |
| 280 | arch_sync_dma_for_cpu_all(dev); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 281 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 282 | EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 283 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 284 | void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 285 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 286 | { |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 287 | phys_addr_t phys = dma_to_phys(dev, addr); |
| 288 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 289 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 290 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 291 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 292 | if (unlikely(is_swiotlb_buffer(phys))) |
| 293 | swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 294 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 295 | EXPORT_SYMBOL(dma_direct_unmap_page); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 296 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 297 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 298 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 299 | { |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 300 | struct scatterlist *sg; |
| 301 | int i; |
| 302 | |
| 303 | for_each_sg(sgl, sg, nents, i) |
| 304 | dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, |
| 305 | attrs); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 306 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 307 | EXPORT_SYMBOL(dma_direct_unmap_sg); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 308 | #endif |
| 309 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 310 | static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, |
| 311 | size_t size) |
| 312 | { |
| 313 | return swiotlb_force != SWIOTLB_FORCE && |
| 314 | (!dev || dma_capable(dev, dma_addr, size)); |
| 315 | } |
| 316 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 317 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 318 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 319 | unsigned long attrs) |
| 320 | { |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 321 | phys_addr_t phys = page_to_phys(page) + offset; |
| 322 | dma_addr_t dma_addr = phys_to_dma(dev, phys); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 323 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 324 | if (unlikely(!dma_direct_possible(dev, dma_addr, size)) && |
| 325 | !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) { |
Christoph Hellwig | 58dfd4a | 2018-12-03 07:43:05 +0100 | [diff] [blame] | 326 | report_addr(dev, dma_addr, size); |
Christoph Hellwig | b0cbeae | 2018-11-21 18:52:35 +0100 | [diff] [blame] | 327 | return DMA_MAPPING_ERROR; |
Christoph Hellwig | 58dfd4a | 2018-12-03 07:43:05 +0100 | [diff] [blame] | 328 | } |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 329 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 330 | if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 331 | arch_sync_dma_for_device(dev, phys, size, dir); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 332 | return dma_addr; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 333 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 334 | EXPORT_SYMBOL(dma_direct_map_page); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 335 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 336 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 337 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 338 | { |
| 339 | int i; |
| 340 | struct scatterlist *sg; |
| 341 | |
| 342 | for_each_sg(sgl, sg, nents, i) { |
Christoph Hellwig | 17ac524 | 2018-12-03 11:14:09 +0100 | [diff] [blame] | 343 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
| 344 | sg->offset, sg->length, dir, attrs); |
| 345 | if (sg->dma_address == DMA_MAPPING_ERROR) |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 346 | goto out_unmap; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 347 | sg_dma_len(sg) = sg->length; |
| 348 | } |
| 349 | |
| 350 | return nents; |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 351 | |
| 352 | out_unmap: |
| 353 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); |
| 354 | return 0; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 355 | } |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 356 | EXPORT_SYMBOL(dma_direct_map_sg); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 357 | |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 358 | /* |
| 359 | * Because 32-bit DMA masks are so common we expect every architecture to be |
| 360 | * able to satisfy them - either by not supporting more physical memory, or by |
| 361 | * providing a ZONE_DMA32. If neither is the case, the architecture needs to |
| 362 | * use an IOMMU instead of the direct mapping. |
| 363 | */ |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 364 | int dma_direct_supported(struct device *dev, u64 mask) |
| 365 | { |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 366 | u64 min_mask; |
| 367 | |
| 368 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
| 369 | min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); |
| 370 | else |
| 371 | min_mask = DMA_BIT_MASK(32); |
| 372 | |
| 373 | min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); |
| 374 | |
Lendacky, Thomas | c92a54c | 2018-12-17 14:39:16 +0000 | [diff] [blame] | 375 | /* |
| 376 | * This check needs to be against the actual bit mask value, so |
| 377 | * use __phys_to_dma() here so that the SME encryption mask isn't |
| 378 | * part of the check. |
| 379 | */ |
| 380 | return mask >= __phys_to_dma(dev, min_mask); |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 381 | } |