Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Robert P. J. Day | 96532ba | 2008-02-03 15:06:26 +0200 | [diff] [blame] | 2 | #ifndef _LINUX_DMA_MAPPING_H |
| 3 | #define _LINUX_DMA_MAPPING_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 5 | #include <linux/sizes.h> |
Andrew Morton | 842fa69 | 2011-11-02 13:39:33 -0700 | [diff] [blame] | 6 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/device.h> |
| 8 | #include <linux/err.h> |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 9 | #include <linux/dma-debug.h> |
Alexey Dobriyan | b7f080c | 2011-06-16 11:01:34 +0000 | [diff] [blame] | 10 | #include <linux/dma-direction.h> |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 12 | #include <linux/bug.h> |
Tom Lendacky | 648babb | 2017-07-17 16:10:22 -0500 | [diff] [blame] | 13 | #include <linux/mem_encrypt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 15 | /** |
| 16 | * List of possible attributes associated with a DMA mapping. The semantics |
| 17 | * of each attribute should be defined in Documentation/DMA-attributes.txt. |
| 18 | * |
| 19 | * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute |
| 20 | * forces all pending DMA writes to complete. |
| 21 | */ |
| 22 | #define DMA_ATTR_WRITE_BARRIER (1UL << 0) |
| 23 | /* |
| 24 | * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping |
| 25 | * may be weakly ordered, that is that reads and writes may pass each other. |
| 26 | */ |
| 27 | #define DMA_ATTR_WEAK_ORDERING (1UL << 1) |
| 28 | /* |
| 29 | * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be |
| 30 | * buffered to improve performance. |
| 31 | */ |
| 32 | #define DMA_ATTR_WRITE_COMBINE (1UL << 2) |
| 33 | /* |
| 34 | * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either |
| 35 | * consistent or non-consistent memory as it sees fit. |
| 36 | */ |
| 37 | #define DMA_ATTR_NON_CONSISTENT (1UL << 3) |
| 38 | /* |
| 39 | * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel |
| 40 | * virtual mapping for the allocated buffer. |
| 41 | */ |
| 42 | #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) |
| 43 | /* |
| 44 | * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of |
| 45 | * the CPU cache for the given buffer assuming that it has been already |
| 46 | * transferred to 'device' domain. |
| 47 | */ |
| 48 | #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) |
| 49 | /* |
| 50 | * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer |
| 51 | * in physical memory. |
| 52 | */ |
| 53 | #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) |
| 54 | /* |
| 55 | * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem |
| 56 | * that it's probably not worth the time to try to allocate memory to in a way |
| 57 | * that gives better TLB efficiency. |
| 58 | */ |
| 59 | #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) |
Mauricio Faria de Oliveira | a9a62c9 | 2016-10-11 13:54:14 -0700 | [diff] [blame] | 60 | /* |
| 61 | * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress |
| 62 | * allocation failure reports (similarly to __GFP_NOWARN). |
| 63 | */ |
| 64 | #define DMA_ATTR_NO_WARN (1UL << 8) |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 65 | |
Bjorn Helgaas | 77f2ea2 | 2014-04-30 11:20:53 -0600 | [diff] [blame] | 66 | /* |
Mitchel Humpherys | b2fb366 | 2017-01-06 18:58:11 +0530 | [diff] [blame] | 67 | * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully |
| 68 | * accessible at an elevated privilege level (and ideally inaccessible or |
| 69 | * at least read-only at lesser-privileged levels). |
| 70 | */ |
| 71 | #define DMA_ATTR_PRIVILEGED (1UL << 9) |
| 72 | |
| 73 | /* |
Bjorn Helgaas | 77f2ea2 | 2014-04-30 11:20:53 -0600 | [diff] [blame] | 74 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
| 75 | * It can be given to a device to use as a DMA source or target. A CPU cannot |
| 76 | * reference a dma_addr_t directly because there may be translation between |
| 77 | * its physical address space and the bus address space. |
| 78 | */ |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 79 | struct dma_map_ops { |
Marek Szyprowski | 613c457 | 2012-03-28 16:36:27 +0200 | [diff] [blame] | 80 | void* (*alloc)(struct device *dev, size_t size, |
| 81 | dma_addr_t *dma_handle, gfp_t gfp, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 82 | unsigned long attrs); |
Marek Szyprowski | 613c457 | 2012-03-28 16:36:27 +0200 | [diff] [blame] | 83 | void (*free)(struct device *dev, size_t size, |
| 84 | void *vaddr, dma_addr_t dma_handle, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 85 | unsigned long attrs); |
Marek Szyprowski | 9adc537 | 2011-12-21 16:55:33 +0100 | [diff] [blame] | 86 | int (*mmap)(struct device *, struct vm_area_struct *, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 87 | void *, dma_addr_t, size_t, |
| 88 | unsigned long attrs); |
Marek Szyprowski | 9adc537 | 2011-12-21 16:55:33 +0100 | [diff] [blame] | 89 | |
Marek Szyprowski | d2b7428 | 2012-06-13 10:05:52 +0200 | [diff] [blame] | 90 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 91 | dma_addr_t, size_t, unsigned long attrs); |
Marek Szyprowski | d2b7428 | 2012-06-13 10:05:52 +0200 | [diff] [blame] | 92 | |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 93 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
| 94 | unsigned long offset, size_t size, |
| 95 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 96 | unsigned long attrs); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 97 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
| 98 | size_t size, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 99 | unsigned long attrs); |
Ricardo Ribalda Delgado | 04abab6 | 2015-02-11 13:53:15 +0100 | [diff] [blame] | 100 | /* |
| 101 | * map_sg returns 0 on error and a value > 0 on success. |
| 102 | * It should never return a value < 0. |
| 103 | */ |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 104 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
| 105 | int nents, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 106 | unsigned long attrs); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 107 | void (*unmap_sg)(struct device *dev, |
| 108 | struct scatterlist *sg, int nents, |
| 109 | enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 110 | unsigned long attrs); |
Niklas Söderlund | ba409b3 | 2016-08-10 13:22:14 +0200 | [diff] [blame] | 111 | dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr, |
| 112 | size_t size, enum dma_data_direction dir, |
| 113 | unsigned long attrs); |
| 114 | void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle, |
| 115 | size_t size, enum dma_data_direction dir, |
| 116 | unsigned long attrs); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 117 | void (*sync_single_for_cpu)(struct device *dev, |
| 118 | dma_addr_t dma_handle, size_t size, |
| 119 | enum dma_data_direction dir); |
| 120 | void (*sync_single_for_device)(struct device *dev, |
| 121 | dma_addr_t dma_handle, size_t size, |
| 122 | enum dma_data_direction dir); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 123 | void (*sync_sg_for_cpu)(struct device *dev, |
| 124 | struct scatterlist *sg, int nents, |
| 125 | enum dma_data_direction dir); |
| 126 | void (*sync_sg_for_device)(struct device *dev, |
| 127 | struct scatterlist *sg, int nents, |
| 128 | enum dma_data_direction dir); |
Christoph Hellwig | c9eb617 | 2017-08-27 10:37:15 +0200 | [diff] [blame] | 129 | void (*cache_sync)(struct device *dev, void *vaddr, size_t size, |
| 130 | enum dma_data_direction direction); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 131 | int (*dma_supported)(struct device *dev, u64 mask); |
Milton Miller | 3a8f755 | 2011-06-24 09:05:23 +0000 | [diff] [blame] | 132 | u64 (*get_required_mask)(struct device *dev); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 133 | }; |
| 134 | |
Christoph Hellwig | 42ee3ca | 2018-11-21 18:52:35 +0100 | [diff] [blame] | 135 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
| 136 | |
Bart Van Assche | 551199a | 2017-01-20 13:04:07 -0800 | [diff] [blame] | 137 | extern const struct dma_map_ops dma_virt_ops; |
Robin Murphy | 90ac706 | 2018-12-06 13:14:44 -0800 | [diff] [blame] | 138 | extern const struct dma_map_ops dma_dummy_ops; |
Christian Borntraeger | a8463d4 | 2016-02-02 21:46:32 -0800 | [diff] [blame] | 139 | |
Andrew Morton | 8f286c3 | 2007-10-18 03:05:07 -0700 | [diff] [blame] | 140 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
Borislav Petkov | 34c6538 | 2007-10-18 03:05:06 -0700 | [diff] [blame] | 141 | |
James Bottomley | 32e8f70 | 2007-10-16 01:23:55 -0700 | [diff] [blame] | 142 | #define DMA_MASK_NONE 0x0ULL |
| 143 | |
Rolf Eike Beer | d6bd3a3 | 2006-09-29 01:59:48 -0700 | [diff] [blame] | 144 | static inline int valid_dma_direction(int dma_direction) |
| 145 | { |
| 146 | return ((dma_direction == DMA_BIDIRECTIONAL) || |
| 147 | (dma_direction == DMA_TO_DEVICE) || |
| 148 | (dma_direction == DMA_FROM_DEVICE)); |
| 149 | } |
| 150 | |
James Bottomley | 32e8f70 | 2007-10-16 01:23:55 -0700 | [diff] [blame] | 151 | static inline int is_device_dma_capable(struct device *dev) |
| 152 | { |
| 153 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
| 154 | } |
| 155 | |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 156 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
| 157 | /* |
| 158 | * These three functions are only for dma allocator. |
| 159 | * Don't use them in device drivers. |
| 160 | */ |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 161 | int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 162 | dma_addr_t *dma_handle, void **ret); |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 163 | int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 164 | |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 165 | int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 166 | void *cpu_addr, size_t size, int *ret); |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 167 | |
| 168 | void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle); |
| 169 | int dma_release_from_global_coherent(int order, void *vaddr); |
| 170 | int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, |
| 171 | size_t size, int *ret); |
| 172 | |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 173 | #else |
Vladimir Murzin | 43fc509 | 2017-07-20 11:19:58 +0100 | [diff] [blame] | 174 | #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) |
| 175 | #define dma_release_from_dev_coherent(dev, order, vaddr) (0) |
| 176 | #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) |
| 177 | |
| 178 | static inline void *dma_alloc_from_global_coherent(ssize_t size, |
| 179 | dma_addr_t *dma_handle) |
| 180 | { |
| 181 | return NULL; |
| 182 | } |
| 183 | |
| 184 | static inline int dma_release_from_global_coherent(int order, void *vaddr) |
| 185 | { |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, |
| 190 | void *cpu_addr, size_t size, |
| 191 | int *ret) |
| 192 | { |
| 193 | return 0; |
| 194 | } |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 195 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
| 196 | |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 197 | static inline bool dma_is_direct(const struct dma_map_ops *ops) |
| 198 | { |
| 199 | return likely(!ops); |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * All the dma_direct_* declarations are here just for the indirect call bypass, |
| 204 | * and must not be used directly drivers! |
| 205 | */ |
| 206 | dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, |
| 207 | unsigned long offset, size_t size, enum dma_data_direction dir, |
| 208 | unsigned long attrs); |
| 209 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 210 | enum dma_data_direction dir, unsigned long attrs); |
| 211 | |
| 212 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 213 | defined(CONFIG_SWIOTLB) |
| 214 | void dma_direct_sync_single_for_device(struct device *dev, |
| 215 | dma_addr_t addr, size_t size, enum dma_data_direction dir); |
| 216 | void dma_direct_sync_sg_for_device(struct device *dev, |
| 217 | struct scatterlist *sgl, int nents, enum dma_data_direction dir); |
| 218 | #else |
| 219 | static inline void dma_direct_sync_single_for_device(struct device *dev, |
| 220 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 221 | { |
| 222 | } |
| 223 | static inline void dma_direct_sync_sg_for_device(struct device *dev, |
| 224 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 225 | { |
| 226 | } |
| 227 | #endif |
| 228 | |
| 229 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
| 230 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
| 231 | defined(CONFIG_SWIOTLB) |
| 232 | void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, |
| 233 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
| 234 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
| 235 | int nents, enum dma_data_direction dir, unsigned long attrs); |
| 236 | void dma_direct_sync_single_for_cpu(struct device *dev, |
| 237 | dma_addr_t addr, size_t size, enum dma_data_direction dir); |
| 238 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
| 239 | struct scatterlist *sgl, int nents, enum dma_data_direction dir); |
| 240 | #else |
| 241 | static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, |
| 242 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 243 | { |
| 244 | } |
| 245 | static inline void dma_direct_unmap_sg(struct device *dev, |
| 246 | struct scatterlist *sgl, int nents, enum dma_data_direction dir, |
| 247 | unsigned long attrs) |
| 248 | { |
| 249 | } |
| 250 | static inline void dma_direct_sync_single_for_cpu(struct device *dev, |
| 251 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 252 | { |
| 253 | } |
| 254 | static inline void dma_direct_sync_sg_for_cpu(struct device *dev, |
| 255 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 256 | { |
| 257 | } |
| 258 | #endif |
| 259 | |
Christoph Hellwig | ed6ccf1 | 2018-12-26 07:52:13 +0100 | [diff] [blame] | 260 | #ifdef CONFIG_HAS_DMA |
| 261 | #include <asm/dma-mapping.h> |
| 262 | |
| 263 | static inline const struct dma_map_ops *get_dma_ops(struct device *dev) |
| 264 | { |
| 265 | if (dev && dev->dma_ops) |
| 266 | return dev->dma_ops; |
| 267 | return get_arch_dma_ops(dev ? dev->bus : NULL); |
| 268 | } |
| 269 | |
| 270 | static inline void set_dma_ops(struct device *dev, |
| 271 | const struct dma_map_ops *dma_ops) |
| 272 | { |
| 273 | dev->dma_ops = dma_ops; |
| 274 | } |
| 275 | |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 276 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 277 | struct page *page, size_t offset, size_t size, |
| 278 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 279 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 280 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 281 | dma_addr_t addr; |
| 282 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 283 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 284 | if (dma_is_direct(ops)) |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 285 | addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 286 | else |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 287 | addr = ops->map_page(dev, page, offset, size, dir, attrs); |
| 288 | debug_dma_map_page(dev, page, offset, size, dir, addr); |
| 289 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 290 | return addr; |
| 291 | } |
| 292 | |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 293 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, |
| 294 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 295 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 296 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 297 | |
| 298 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 299 | if (dma_is_direct(ops)) |
| 300 | dma_direct_unmap_page(dev, addr, size, dir, attrs); |
| 301 | else if (ops->unmap_page) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 302 | ops->unmap_page(dev, addr, size, dir, attrs); |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 303 | debug_dma_unmap_page(dev, addr, size, dir); |
Christoph Hellwig | 7f0fee2 | 2018-12-06 12:24:27 -0800 | [diff] [blame] | 304 | } |
| 305 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 306 | /* |
| 307 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. |
| 308 | * It should never return a value < 0. |
| 309 | */ |
| 310 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 311 | int nents, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 312 | unsigned long attrs) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 313 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 314 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Levin, Alexander (Sasha Levin) | 4950276 | 2017-11-15 17:35:51 -0800 | [diff] [blame] | 315 | int ents; |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 316 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 317 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 318 | if (dma_is_direct(ops)) |
| 319 | ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); |
| 320 | else |
| 321 | ents = ops->map_sg(dev, sg, nents, dir, attrs); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 322 | BUG_ON(ents < 0); |
| 323 | debug_dma_map_sg(dev, sg, nents, ents, dir); |
| 324 | |
| 325 | return ents; |
| 326 | } |
| 327 | |
| 328 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 329 | int nents, enum dma_data_direction dir, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 330 | unsigned long attrs) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 331 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 332 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 333 | |
| 334 | BUG_ON(!valid_dma_direction(dir)); |
| 335 | debug_dma_unmap_sg(dev, sg, nents, dir); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 336 | if (dma_is_direct(ops)) |
| 337 | dma_direct_unmap_sg(dev, sg, nents, dir, attrs); |
| 338 | else if (ops->unmap_sg) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 339 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
| 340 | } |
| 341 | |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 342 | static inline dma_addr_t dma_map_resource(struct device *dev, |
| 343 | phys_addr_t phys_addr, |
| 344 | size_t size, |
| 345 | enum dma_data_direction dir, |
| 346 | unsigned long attrs) |
| 347 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 348 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 349 | dma_addr_t addr; |
| 350 | |
| 351 | BUG_ON(!valid_dma_direction(dir)); |
| 352 | |
| 353 | /* Don't allow RAM to be mapped */ |
Niklas Söderlund | 3757dc4 | 2016-09-29 12:02:40 +0200 | [diff] [blame] | 354 | BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 355 | |
| 356 | addr = phys_addr; |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 357 | if (ops && ops->map_resource) |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 358 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); |
| 359 | |
| 360 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); |
| 361 | |
| 362 | return addr; |
| 363 | } |
| 364 | |
| 365 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
| 366 | size_t size, enum dma_data_direction dir, |
| 367 | unsigned long attrs) |
| 368 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 369 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 370 | |
| 371 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 372 | if (ops && ops->unmap_resource) |
Niklas Söderlund | 6f3d879 | 2016-08-10 13:22:16 +0200 | [diff] [blame] | 373 | ops->unmap_resource(dev, addr, size, dir, attrs); |
| 374 | debug_dma_unmap_resource(dev, addr, size, dir); |
| 375 | } |
| 376 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 377 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 378 | size_t size, |
| 379 | enum dma_data_direction dir) |
| 380 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 381 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 382 | |
| 383 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 384 | if (dma_is_direct(ops)) |
| 385 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
| 386 | else if (ops->sync_single_for_cpu) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 387 | ops->sync_single_for_cpu(dev, addr, size, dir); |
| 388 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
| 389 | } |
| 390 | |
| 391 | static inline void dma_sync_single_for_device(struct device *dev, |
| 392 | dma_addr_t addr, size_t size, |
| 393 | enum dma_data_direction dir) |
| 394 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 395 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 396 | |
| 397 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 398 | if (dma_is_direct(ops)) |
| 399 | dma_direct_sync_single_for_device(dev, addr, size, dir); |
| 400 | else if (ops->sync_single_for_device) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 401 | ops->sync_single_for_device(dev, addr, size, dir); |
| 402 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
| 403 | } |
| 404 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 405 | static inline void |
| 406 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 407 | int nelems, enum dma_data_direction dir) |
| 408 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 409 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 410 | |
| 411 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 412 | if (dma_is_direct(ops)) |
| 413 | dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir); |
| 414 | else if (ops->sync_sg_for_cpu) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 415 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
| 416 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
| 417 | } |
| 418 | |
| 419 | static inline void |
| 420 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 421 | int nelems, enum dma_data_direction dir) |
| 422 | { |
Bart Van Assche | 5299709 | 2017-01-20 13:04:01 -0800 | [diff] [blame] | 423 | const struct dma_map_ops *ops = get_dma_ops(dev); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 424 | |
| 425 | BUG_ON(!valid_dma_direction(dir)); |
Christoph Hellwig | 356da6d | 2018-12-06 13:39:32 -0800 | [diff] [blame] | 426 | if (dma_is_direct(ops)) |
| 427 | dma_direct_sync_sg_for_device(dev, sg, nelems, dir); |
| 428 | else if (ops->sync_sg_for_device) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 429 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
| 430 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
| 431 | |
| 432 | } |
| 433 | |
Christoph Hellwig | ed6ccf1 | 2018-12-26 07:52:13 +0100 | [diff] [blame] | 434 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 435 | { |
| 436 | debug_dma_mapping_error(dev, dma_addr); |
| 437 | |
| 438 | if (dma_addr == DMA_MAPPING_ERROR) |
| 439 | return -ENOMEM; |
| 440 | return 0; |
| 441 | } |
| 442 | |
| 443 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 444 | gfp_t flag, unsigned long attrs); |
| 445 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 446 | dma_addr_t dma_handle, unsigned long attrs); |
| 447 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 448 | gfp_t gfp, unsigned long attrs); |
| 449 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 450 | dma_addr_t dma_handle); |
| 451 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 452 | enum dma_data_direction dir); |
| 453 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
| 454 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 455 | unsigned long attrs); |
| 456 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 457 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 458 | unsigned long attrs); |
| 459 | int dma_supported(struct device *dev, u64 mask); |
| 460 | int dma_set_mask(struct device *dev, u64 mask); |
| 461 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
| 462 | u64 dma_get_required_mask(struct device *dev); |
| 463 | #else /* CONFIG_HAS_DMA */ |
| 464 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 465 | struct page *page, size_t offset, size_t size, |
| 466 | enum dma_data_direction dir, unsigned long attrs) |
| 467 | { |
| 468 | return DMA_MAPPING_ERROR; |
| 469 | } |
| 470 | static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, |
| 471 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 472 | { |
| 473 | } |
| 474 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 475 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 476 | { |
| 477 | return 0; |
| 478 | } |
| 479 | static inline void dma_unmap_sg_attrs(struct device *dev, |
| 480 | struct scatterlist *sg, int nents, enum dma_data_direction dir, |
| 481 | unsigned long attrs) |
| 482 | { |
| 483 | } |
| 484 | static inline dma_addr_t dma_map_resource(struct device *dev, |
| 485 | phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, |
| 486 | unsigned long attrs) |
| 487 | { |
| 488 | return DMA_MAPPING_ERROR; |
| 489 | } |
| 490 | static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, |
| 491 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 492 | { |
| 493 | } |
| 494 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 495 | size_t size, enum dma_data_direction dir) |
| 496 | { |
| 497 | } |
| 498 | static inline void dma_sync_single_for_device(struct device *dev, |
| 499 | dma_addr_t addr, size_t size, enum dma_data_direction dir) |
| 500 | { |
| 501 | } |
| 502 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 503 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 504 | { |
| 505 | } |
| 506 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 507 | struct scatterlist *sg, int nelems, enum dma_data_direction dir) |
| 508 | { |
| 509 | } |
| 510 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 511 | { |
| 512 | return -ENOMEM; |
| 513 | } |
| 514 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 515 | dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs) |
| 516 | { |
| 517 | return NULL; |
| 518 | } |
| 519 | static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 520 | dma_addr_t dma_handle, unsigned long attrs) |
| 521 | { |
| 522 | } |
| 523 | static inline void *dmam_alloc_attrs(struct device *dev, size_t size, |
| 524 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 525 | { |
| 526 | return NULL; |
| 527 | } |
| 528 | static inline void dmam_free_coherent(struct device *dev, size_t size, |
| 529 | void *vaddr, dma_addr_t dma_handle) |
| 530 | { |
| 531 | } |
| 532 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 533 | enum dma_data_direction dir) |
| 534 | { |
| 535 | } |
| 536 | static inline int dma_get_sgtable_attrs(struct device *dev, |
| 537 | struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, |
| 538 | size_t size, unsigned long attrs) |
| 539 | { |
| 540 | return -ENXIO; |
| 541 | } |
| 542 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 543 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 544 | unsigned long attrs) |
| 545 | { |
| 546 | return -ENXIO; |
| 547 | } |
| 548 | static inline int dma_supported(struct device *dev, u64 mask) |
| 549 | { |
| 550 | return 0; |
| 551 | } |
| 552 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 553 | { |
| 554 | return -EIO; |
| 555 | } |
| 556 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 557 | { |
| 558 | return -EIO; |
| 559 | } |
| 560 | static inline u64 dma_get_required_mask(struct device *dev) |
| 561 | { |
| 562 | return 0; |
| 563 | } |
| 564 | #endif /* CONFIG_HAS_DMA */ |
| 565 | |
Christoph Hellwig | 2e05ea5 | 2018-12-25 08:50:35 +0100 | [diff] [blame] | 566 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
| 567 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 568 | { |
| 569 | debug_dma_map_single(dev, ptr, size); |
| 570 | return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), |
| 571 | size, dir, attrs); |
| 572 | } |
| 573 | |
| 574 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
| 575 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 576 | { |
| 577 | return dma_unmap_page_attrs(dev, addr, size, dir, attrs); |
| 578 | } |
| 579 | |
Christoph Hellwig | ed6ccf1 | 2018-12-26 07:52:13 +0100 | [diff] [blame] | 580 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 581 | dma_addr_t addr, unsigned long offset, size_t size, |
| 582 | enum dma_data_direction dir) |
| 583 | { |
| 584 | return dma_sync_single_for_cpu(dev, addr + offset, size, dir); |
| 585 | } |
| 586 | |
| 587 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 588 | dma_addr_t addr, unsigned long offset, size_t size, |
| 589 | enum dma_data_direction dir) |
| 590 | { |
| 591 | return dma_sync_single_for_device(dev, addr + offset, size, dir); |
| 592 | } |
| 593 | |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 594 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) |
| 595 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) |
| 596 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) |
| 597 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) |
Alexander Duyck | 0495c3d | 2016-12-14 15:05:23 -0800 | [diff] [blame] | 598 | #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0) |
| 599 | #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0) |
Christoph Hellwig | ed6ccf1 | 2018-12-26 07:52:13 +0100 | [diff] [blame] | 600 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) |
| 601 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) |
Christoph Hellwig | c9eb617 | 2017-08-27 10:37:15 +0200 | [diff] [blame] | 602 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 603 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
Christoph Hellwig | 58b0440 | 2018-09-11 08:55:28 +0200 | [diff] [blame] | 604 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 605 | unsigned long attrs); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 606 | |
| 607 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
| 608 | unsigned long vm_flags, |
| 609 | pgprot_t prot, const void *caller); |
| 610 | |
| 611 | void *dma_common_pages_remap(struct page **pages, size_t size, |
| 612 | unsigned long vm_flags, pgprot_t prot, |
| 613 | const void *caller); |
| 614 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); |
| 615 | |
Christoph Hellwig | 0c3b317 | 2018-11-04 20:29:28 +0100 | [diff] [blame] | 616 | int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot); |
| 617 | bool dma_in_atomic_pool(void *start, size_t size); |
| 618 | void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags); |
| 619 | bool dma_free_from_pool(void *start, size_t size); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 620 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 621 | int |
Christoph Hellwig | 9406a49 | 2018-08-23 09:39:38 +0200 | [diff] [blame] | 622 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
| 623 | dma_addr_t dma_addr, size_t size, unsigned long attrs); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 624 | |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 625 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
Christoph Hellwig | 7ed1d91 | 2018-09-24 13:06:58 +0200 | [diff] [blame] | 626 | dma_addr_t *dma_handle, gfp_t gfp) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 627 | { |
Christoph Hellwig | 7ed1d91 | 2018-09-24 13:06:58 +0200 | [diff] [blame] | 628 | |
| 629 | return dma_alloc_attrs(dev, size, dma_handle, gfp, |
| 630 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 634 | void *cpu_addr, dma_addr_t dma_handle) |
| 635 | { |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 636 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 637 | } |
| 638 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 640 | static inline u64 dma_get_mask(struct device *dev) |
| 641 | { |
FUJITA Tomonori | 07a2c01 | 2008-09-19 02:02:05 +0900 | [diff] [blame] | 642 | if (dev && dev->dma_mask && *dev->dma_mask) |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 643 | return *dev->dma_mask; |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 644 | return DMA_BIT_MASK(32); |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 645 | } |
| 646 | |
Russell King | 4aa806b | 2013-06-26 13:49:44 +0100 | [diff] [blame] | 647 | /* |
| 648 | * Set both the DMA mask and the coherent DMA mask to the same thing. |
| 649 | * Note that we don't check the return value from dma_set_coherent_mask() |
| 650 | * as the DMA API guarantees that the coherent DMA mask can be set to |
| 651 | * the same or smaller than the streaming DMA mask. |
| 652 | */ |
| 653 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) |
| 654 | { |
| 655 | int rc = dma_set_mask(dev, mask); |
| 656 | if (rc == 0) |
| 657 | dma_set_coherent_mask(dev, mask); |
| 658 | return rc; |
| 659 | } |
| 660 | |
Russell King | fa6a8d6 | 2013-06-27 12:21:45 +0100 | [diff] [blame] | 661 | /* |
| 662 | * Similar to the above, except it deals with the case where the device |
| 663 | * does not have dev->dma_mask appropriately setup. |
| 664 | */ |
| 665 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) |
| 666 | { |
| 667 | dev->dma_mask = &dev->coherent_dma_mask; |
| 668 | return dma_set_mask_and_coherent(dev, mask); |
| 669 | } |
| 670 | |
Will Deacon | a3a60f8 | 2014-08-27 15:49:10 +0100 | [diff] [blame] | 671 | #ifndef arch_setup_dma_ops |
Will Deacon | 97890ba | 2014-08-27 16:24:20 +0100 | [diff] [blame] | 672 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
Robin Murphy | 53c92d7 | 2016-04-07 18:42:05 +0100 | [diff] [blame] | 673 | u64 size, const struct iommu_ops *iommu, |
Will Deacon | 97890ba | 2014-08-27 16:24:20 +0100 | [diff] [blame] | 674 | bool coherent) { } |
| 675 | #endif |
| 676 | |
| 677 | #ifndef arch_teardown_dma_ops |
Christoph Hellwig | 1a0afc1 | 2018-09-25 13:16:55 -0700 | [diff] [blame] | 678 | static inline void arch_teardown_dma_ops(struct device *dev) { } |
Santosh Shilimkar | 591c1ee | 2014-04-24 11:30:04 -0400 | [diff] [blame] | 679 | #endif |
| 680 | |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 681 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
| 682 | { |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 683 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
| 684 | return dev->dma_parms->max_segment_size; |
| 685 | return SZ_64K; |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 686 | } |
| 687 | |
Niklas Söderlund | c9d76d0 | 2018-08-29 23:29:21 +0200 | [diff] [blame] | 688 | static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 689 | { |
| 690 | if (dev->dma_parms) { |
| 691 | dev->dma_parms->max_segment_size = size; |
| 692 | return 0; |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 693 | } |
| 694 | return -EIO; |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 695 | } |
| 696 | |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 697 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
| 698 | { |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 699 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
| 700 | return dev->dma_parms->segment_boundary_mask; |
| 701 | return DMA_BIT_MASK(32); |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 702 | } |
| 703 | |
| 704 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
| 705 | { |
| 706 | if (dev->dma_parms) { |
| 707 | dev->dma_parms->segment_boundary_mask = mask; |
| 708 | return 0; |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 709 | } |
| 710 | return -EIO; |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 711 | } |
| 712 | |
Santosh Shilimkar | 00c8f16 | 2013-07-29 14:18:48 +0100 | [diff] [blame] | 713 | #ifndef dma_max_pfn |
| 714 | static inline unsigned long dma_max_pfn(struct device *dev) |
| 715 | { |
Christoph Hellwig | a41ef1e | 2017-11-30 07:32:51 -0800 | [diff] [blame] | 716 | return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; |
Santosh Shilimkar | 00c8f16 | 2013-07-29 14:18:48 +0100 | [diff] [blame] | 717 | } |
| 718 | #endif |
| 719 | |
FUJITA Tomonori | 4565f01 | 2010-08-10 18:03:22 -0700 | [diff] [blame] | 720 | static inline int dma_get_cache_alignment(void) |
| 721 | { |
| 722 | #ifdef ARCH_DMA_MINALIGN |
| 723 | return ARCH_DMA_MINALIGN; |
| 724 | #endif |
| 725 | return 1; |
| 726 | } |
| 727 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | /* flags for the coherent memory api */ |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 729 | #define DMA_MEMORY_EXCLUSIVE 0x01 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 731 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
| 732 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 733 | dma_addr_t device_addr, size_t size, int flags); |
| 734 | void dma_release_declared_memory(struct device *dev); |
| 735 | void *dma_mark_declared_memory_occupied(struct device *dev, |
| 736 | dma_addr_t device_addr, size_t size); |
| 737 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | static inline int |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 739 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | dma_addr_t device_addr, size_t size, int flags) |
| 741 | { |
Christoph Hellwig | 2436bdc | 2017-08-25 17:13:09 +0200 | [diff] [blame] | 742 | return -ENOSYS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 743 | } |
| 744 | |
| 745 | static inline void |
| 746 | dma_release_declared_memory(struct device *dev) |
| 747 | { |
| 748 | } |
| 749 | |
| 750 | static inline void * |
| 751 | dma_mark_declared_memory_occupied(struct device *dev, |
| 752 | dma_addr_t device_addr, size_t size) |
| 753 | { |
| 754 | return ERR_PTR(-EBUSY); |
| 755 | } |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 756 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | |
Christoph Hellwig | d7076f0 | 2018-12-25 17:44:19 +0100 | [diff] [blame] | 758 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
| 759 | dma_addr_t *dma_handle, gfp_t gfp) |
| 760 | { |
| 761 | return dmam_alloc_attrs(dev, size, dma_handle, gfp, |
| 762 | (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0); |
| 763 | } |
| 764 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 765 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
| 766 | dma_addr_t *dma_addr, gfp_t gfp) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 767 | { |
Christoph Hellwig | 0cd60eb | 2018-12-22 09:21:08 +0100 | [diff] [blame] | 768 | unsigned long attrs = DMA_ATTR_WRITE_COMBINE; |
Christoph Hellwig | 7ed1d91 | 2018-09-24 13:06:58 +0200 | [diff] [blame] | 769 | |
| 770 | if (gfp & __GFP_NOWARN) |
| 771 | attrs |= DMA_ATTR_NO_WARN; |
| 772 | |
| 773 | return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs); |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 774 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 775 | #ifndef dma_alloc_writecombine |
| 776 | #define dma_alloc_writecombine dma_alloc_wc |
| 777 | #endif |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 778 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 779 | static inline void dma_free_wc(struct device *dev, size_t size, |
| 780 | void *cpu_addr, dma_addr_t dma_addr) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 781 | { |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 782 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, |
| 783 | DMA_ATTR_WRITE_COMBINE); |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 784 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 785 | #ifndef dma_free_writecombine |
| 786 | #define dma_free_writecombine dma_free_wc |
| 787 | #endif |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 788 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 789 | static inline int dma_mmap_wc(struct device *dev, |
| 790 | struct vm_area_struct *vma, |
| 791 | void *cpu_addr, dma_addr_t dma_addr, |
| 792 | size_t size) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 793 | { |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 794 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, |
| 795 | DMA_ATTR_WRITE_COMBINE); |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 796 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 797 | #ifndef dma_mmap_writecombine |
| 798 | #define dma_mmap_writecombine dma_mmap_wc |
| 799 | #endif |
Arthur Kepner | 74bc7ce | 2008-04-29 01:00:30 -0700 | [diff] [blame] | 800 | |
Christoph Hellwig | f616ab5 | 2018-05-09 06:53:49 +0200 | [diff] [blame] | 801 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
FUJITA Tomonori | 0acedc1 | 2010-03-10 15:23:31 -0800 | [diff] [blame] | 802 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
| 803 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME |
| 804 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
| 805 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) |
| 806 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) |
| 807 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) |
| 808 | #else |
| 809 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) |
| 810 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) |
| 811 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) |
| 812 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) |
| 813 | #define dma_unmap_len(PTR, LEN_NAME) (0) |
| 814 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) |
| 815 | #endif |
| 816 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | #endif |