Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 3 | * Copyright (C) 2018 Christoph Hellwig. |
| 4 | * |
| 5 | * DMA operations that map physical memory directly without using an IOMMU. |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 6 | */ |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> /* for max_pfn */ |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 8 | #include <linux/export.h> |
| 9 | #include <linux/mm.h> |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 10 | #include <linux/dma-direct.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 12 | #include <linux/dma-contiguous.h> |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 13 | #include <linux/dma-noncoherent.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 14 | #include <linux/pfn.h> |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 15 | #include <linux/set_memory.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 16 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 17 | /* |
| 18 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but |
| 19 | * some use it for entirely different regions: |
| 20 | */ |
| 21 | #ifndef ARCH_ZONE_DMA_BITS |
| 22 | #define ARCH_ZONE_DMA_BITS 24 |
| 23 | #endif |
| 24 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 25 | /* |
| 26 | * For AMD SEV all DMA must be to unencrypted addresses. |
| 27 | */ |
| 28 | static inline bool force_dma_unencrypted(void) |
| 29 | { |
| 30 | return sev_active(); |
| 31 | } |
| 32 | |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 33 | static bool |
| 34 | check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 35 | const char *caller) |
| 36 | { |
| 37 | if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { |
Christoph Hellwig | 2550bbfd | 2018-05-29 16:15:12 +0200 | [diff] [blame] | 38 | if (!dev->dma_mask) { |
| 39 | dev_err(dev, |
| 40 | "%s: call on device without dma_mask\n", |
| 41 | caller); |
| 42 | return false; |
| 43 | } |
| 44 | |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 45 | if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 46 | dev_err(dev, |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 47 | "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", |
| 48 | caller, &dma_addr, size, |
| 49 | *dev->dma_mask, dev->bus_dma_mask); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 50 | } |
| 51 | return false; |
| 52 | } |
| 53 | return true; |
| 54 | } |
| 55 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 56 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
| 57 | phys_addr_t phys) |
| 58 | { |
| 59 | if (force_dma_unencrypted()) |
| 60 | return __phys_to_dma(dev, phys); |
| 61 | return phys_to_dma(dev, phys); |
| 62 | } |
| 63 | |
| 64 | u64 dma_direct_get_required_mask(struct device *dev) |
| 65 | { |
| 66 | u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); |
| 67 | |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 68 | if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) |
| 69 | max_dma = dev->bus_dma_mask; |
| 70 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 71 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; |
| 72 | } |
| 73 | |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 74 | static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
| 75 | u64 *phys_mask) |
| 76 | { |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 77 | if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) |
| 78 | dma_mask = dev->bus_dma_mask; |
| 79 | |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 80 | if (force_dma_unencrypted()) |
| 81 | *phys_mask = __dma_to_phys(dev, dma_mask); |
| 82 | else |
| 83 | *phys_mask = dma_to_phys(dev, dma_mask); |
| 84 | |
Christoph Hellwig | 79ac32a | 2018-10-01 07:40:53 -0700 | [diff] [blame] | 85 | /* |
| 86 | * Optimistically try the zone that the physical address mask falls |
| 87 | * into first. If that returns memory that isn't actually addressable |
| 88 | * we will fallback to the next lower zone and try again. |
| 89 | * |
| 90 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding |
| 91 | * zones. |
| 92 | */ |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 93 | if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) |
| 94 | return GFP_DMA; |
| 95 | if (*phys_mask <= DMA_BIT_MASK(32)) |
| 96 | return GFP_DMA32; |
| 97 | return 0; |
| 98 | } |
| 99 | |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 100 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
| 101 | { |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 102 | return phys_to_dma_direct(dev, phys) + size - 1 <= |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 103 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 104 | } |
| 105 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 106 | void *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 107 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 108 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 109 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 110 | int page_order = get_order(size); |
| 111 | struct page *page = NULL; |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 112 | u64 phys_mask; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 113 | void *ret; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 114 | |
Christoph Hellwig | b9fd042 | 2018-09-24 13:10:34 +0200 | [diff] [blame] | 115 | if (attrs & DMA_ATTR_NO_WARN) |
| 116 | gfp |= __GFP_NOWARN; |
| 117 | |
Christoph Hellwig | e89f5b3 | 2018-03-28 15:35:35 +0200 | [diff] [blame] | 118 | /* we always manually zero the memory once we are done: */ |
| 119 | gfp &= ~__GFP_ZERO; |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 120 | gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
| 121 | &phys_mask); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 122 | again: |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 123 | /* CMA can be used only in the context which permits sleeping */ |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 124 | if (gfpflags_allow_blocking(gfp)) { |
Marek Szyprowski | d834c5a | 2018-08-17 15:49:00 -0700 | [diff] [blame] | 125 | page = dma_alloc_from_contiguous(dev, count, page_order, |
| 126 | gfp & __GFP_NOWARN); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 127 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 128 | dma_release_from_contiguous(dev, page, count); |
| 129 | page = NULL; |
| 130 | } |
| 131 | } |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 132 | if (!page) |
Christoph Hellwig | 21f237e | 2017-12-22 11:55:23 +0100 | [diff] [blame] | 133 | page = alloc_pages_node(dev_to_node(dev), gfp, page_order); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 134 | |
| 135 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 136 | __free_pages(page, page_order); |
| 137 | page = NULL; |
| 138 | |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 139 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 140 | phys_mask < DMA_BIT_MASK(64) && |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 141 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
| 142 | gfp |= GFP_DMA32; |
| 143 | goto again; |
| 144 | } |
| 145 | |
Takashi Iwai | 504a918 | 2018-04-15 11:08:07 +0200 | [diff] [blame] | 146 | if (IS_ENABLED(CONFIG_ZONE_DMA) && |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 147 | phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 148 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
| 149 | goto again; |
| 150 | } |
| 151 | } |
| 152 | |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 153 | if (!page) |
| 154 | return NULL; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 155 | ret = page_address(page); |
| 156 | if (force_dma_unencrypted()) { |
| 157 | set_memory_decrypted((unsigned long)ret, 1 << page_order); |
| 158 | *dma_handle = __phys_to_dma(dev, page_to_phys(page)); |
| 159 | } else { |
| 160 | *dma_handle = phys_to_dma(dev, page_to_phys(page)); |
| 161 | } |
| 162 | memset(ret, 0, size); |
| 163 | return ret; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 164 | } |
| 165 | |
Christoph Hellwig | 42ed645 | 2018-02-02 09:51:14 +0100 | [diff] [blame] | 166 | /* |
| 167 | * NOTE: this function must never look at the dma_addr argument, because we want |
| 168 | * to be able to use it as a helper for iommu implementations as well. |
| 169 | */ |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 170 | void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 171 | dma_addr_t dma_addr, unsigned long attrs) |
| 172 | { |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 173 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 174 | unsigned int page_order = get_order(size); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 175 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 176 | if (force_dma_unencrypted()) |
| 177 | set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 178 | if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 179 | free_pages((unsigned long)cpu_addr, page_order); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 180 | } |
| 181 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 182 | void *dma_direct_alloc(struct device *dev, size_t size, |
| 183 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 184 | { |
| 185 | if (!dev_is_dma_coherent(dev)) |
| 186 | return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); |
| 187 | return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); |
| 188 | } |
| 189 | |
| 190 | void dma_direct_free(struct device *dev, size_t size, |
| 191 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) |
| 192 | { |
| 193 | if (!dev_is_dma_coherent(dev)) |
| 194 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
| 195 | else |
| 196 | dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); |
| 197 | } |
| 198 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 199 | static void dma_direct_sync_single_for_device(struct device *dev, |
| 200 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 201 | { |
| 202 | if (dev_is_dma_coherent(dev)) |
| 203 | return; |
| 204 | arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); |
| 205 | } |
| 206 | |
| 207 | static void dma_direct_sync_sg_for_device(struct device *dev, |
| 208 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 209 | { |
| 210 | struct scatterlist *sg; |
| 211 | int i; |
| 212 | |
| 213 | if (dev_is_dma_coherent(dev)) |
| 214 | return; |
| 215 | |
| 216 | for_each_sg(sgl, sg, nents, i) |
| 217 | arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); |
| 218 | } |
| 219 | |
| 220 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
| 221 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
| 222 | static void dma_direct_sync_single_for_cpu(struct device *dev, |
| 223 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 224 | { |
| 225 | if (dev_is_dma_coherent(dev)) |
| 226 | return; |
| 227 | arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); |
| 228 | arch_sync_dma_for_cpu_all(dev); |
| 229 | } |
| 230 | |
| 231 | static void dma_direct_sync_sg_for_cpu(struct device *dev, |
| 232 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 233 | { |
| 234 | struct scatterlist *sg; |
| 235 | int i; |
| 236 | |
| 237 | if (dev_is_dma_coherent(dev)) |
| 238 | return; |
| 239 | |
| 240 | for_each_sg(sgl, sg, nents, i) |
| 241 | arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); |
| 242 | arch_sync_dma_for_cpu_all(dev); |
| 243 | } |
| 244 | |
| 245 | static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, |
| 246 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 247 | { |
| 248 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 249 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
| 250 | } |
| 251 | |
| 252 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
| 253 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 254 | { |
| 255 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 256 | dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); |
| 257 | } |
| 258 | #endif |
| 259 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 260 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 261 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 262 | unsigned long attrs) |
| 263 | { |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 264 | phys_addr_t phys = page_to_phys(page) + offset; |
| 265 | dma_addr_t dma_addr = phys_to_dma(dev, phys); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 266 | |
| 267 | if (!check_addr(dev, dma_addr, size, __func__)) |
| 268 | return DIRECT_MAPPING_ERROR; |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 269 | |
| 270 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 271 | dma_direct_sync_single_for_device(dev, dma_addr, size, dir); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 272 | return dma_addr; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 273 | } |
| 274 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 275 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 276 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 277 | { |
| 278 | int i; |
| 279 | struct scatterlist *sg; |
| 280 | |
| 281 | for_each_sg(sgl, sg, nents, i) { |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 282 | BUG_ON(!sg_page(sg)); |
Christoph Hellwig | 2e86a04 | 2017-12-22 11:29:51 +0100 | [diff] [blame] | 283 | |
| 284 | sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 285 | if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) |
| 286 | return 0; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 287 | sg_dma_len(sg) = sg->length; |
| 288 | } |
| 289 | |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 290 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
| 291 | dma_direct_sync_sg_for_device(dev, sgl, nents, dir); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 292 | return nents; |
| 293 | } |
| 294 | |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 295 | /* |
| 296 | * Because 32-bit DMA masks are so common we expect every architecture to be |
| 297 | * able to satisfy them - either by not supporting more physical memory, or by |
| 298 | * providing a ZONE_DMA32. If neither is the case, the architecture needs to |
| 299 | * use an IOMMU instead of the direct mapping. |
| 300 | */ |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 301 | int dma_direct_supported(struct device *dev, u64 mask) |
| 302 | { |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 303 | u64 min_mask; |
| 304 | |
| 305 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
| 306 | min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); |
| 307 | else |
| 308 | min_mask = DMA_BIT_MASK(32); |
| 309 | |
| 310 | min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); |
| 311 | |
Alexander Duyck | 1fc8e64 | 2018-10-03 16:48:07 -0700 | [diff] [blame] | 312 | return mask >= phys_to_dma(dev, min_mask); |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 313 | } |
| 314 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 315 | int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 316 | { |
| 317 | return dma_addr == DIRECT_MAPPING_ERROR; |
| 318 | } |
| 319 | |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 320 | const struct dma_map_ops dma_direct_ops = { |
| 321 | .alloc = dma_direct_alloc, |
| 322 | .free = dma_direct_free, |
| 323 | .map_page = dma_direct_map_page, |
| 324 | .map_sg = dma_direct_map_sg, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 325 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) |
| 326 | .sync_single_for_device = dma_direct_sync_single_for_device, |
| 327 | .sync_sg_for_device = dma_direct_sync_sg_for_device, |
| 328 | #endif |
| 329 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
| 330 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
| 331 | .sync_single_for_cpu = dma_direct_sync_single_for_cpu, |
| 332 | .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, |
| 333 | .unmap_page = dma_direct_unmap_page, |
| 334 | .unmap_sg = dma_direct_unmap_sg, |
| 335 | #endif |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 336 | .get_required_mask = dma_direct_get_required_mask, |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 337 | .dma_supported = dma_direct_supported, |
Christoph Hellwig | 2797596 | 2018-01-09 16:30:47 +0100 | [diff] [blame] | 338 | .mapping_error = dma_direct_mapping_error, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 339 | .cache_sync = arch_dma_cache_sync, |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 340 | }; |
| 341 | EXPORT_SYMBOL(dma_direct_ops); |