Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 3 | * Copyright (C) 2018-2020 Christoph Hellwig. |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 4 | * |
| 5 | * DMA operations that map physical memory directly without using an IOMMU. |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 6 | */ |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 7 | #include <linux/memblock.h> /* for max_pfn */ |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 8 | #include <linux/export.h> |
| 9 | #include <linux/mm.h> |
Christoph Hellwig | 0a0f0d8 | 2020-09-22 15:31:03 +0200 | [diff] [blame] | 10 | #include <linux/dma-map-ops.h> |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
| 12 | #include <linux/pfn.h> |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 14 | #include <linux/set_memory.h> |
Jim Quinlan | e0d0727 | 2020-09-17 18:43:40 +0200 | [diff] [blame] | 15 | #include <linux/slab.h> |
Christoph Hellwig | 19c65c3 | 2020-09-22 15:34:22 +0200 | [diff] [blame] | 16 | #include "direct.h" |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 17 | |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 18 | /* |
Randy Dunlap | 7b7b8a2 | 2020-10-15 20:10:28 -0700 | [diff] [blame] | 19 | * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use |
Nicolas Saenz Julienne | 8b5369e | 2019-10-14 20:31:03 +0200 | [diff] [blame] | 20 | * it for entirely different regions. In that case the arch code needs to |
| 21 | * override the variable below for dma-direct to work properly. |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 22 | */ |
Nicolas Saenz Julienne | 8b5369e | 2019-10-14 20:31:03 +0200 | [diff] [blame] | 23 | unsigned int zone_dma_bits __ro_after_init = 24; |
Christoph Hellwig | c61e963 | 2018-01-09 23:39:03 +0100 | [diff] [blame] | 24 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 25 | static inline dma_addr_t phys_to_dma_direct(struct device *dev, |
| 26 | phys_addr_t phys) |
| 27 | { |
Tom Lendacky | 9087c37 | 2019-07-10 19:01:19 +0000 | [diff] [blame] | 28 | if (force_dma_unencrypted(dev)) |
Christoph Hellwig | 5ceda74 | 2020-08-17 17:34:03 +0200 | [diff] [blame] | 29 | return phys_to_dma_unencrypted(dev, phys); |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 30 | return phys_to_dma(dev, phys); |
| 31 | } |
| 32 | |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 33 | static inline struct page *dma_direct_to_page(struct device *dev, |
| 34 | dma_addr_t dma_addr) |
| 35 | { |
| 36 | return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); |
| 37 | } |
| 38 | |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 39 | u64 dma_direct_get_required_mask(struct device *dev) |
| 40 | { |
Kishon Vijay Abraham I | cdcda0d | 2020-04-06 10:58:36 +0530 | [diff] [blame] | 41 | phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; |
| 42 | u64 max_dma = phys_to_dma_direct(dev, phys); |
Christoph Hellwig | a20bb05 | 2018-09-20 13:26:13 +0200 | [diff] [blame] | 43 | |
| 44 | return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; |
| 45 | } |
| 46 | |
Christoph Hellwig | 9420139 | 2020-08-14 12:26:24 +0200 | [diff] [blame] | 47 | static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, |
David Rientjes | c84dc6e | 2020-04-14 17:04:55 -0700 | [diff] [blame] | 48 | u64 *phys_limit) |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 49 | { |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 50 | u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); |
Christoph Hellwig | b4ebe60 | 2018-09-20 14:04:08 +0200 | [diff] [blame] | 51 | |
Christoph Hellwig | 79ac32a | 2018-10-01 07:40:53 -0700 | [diff] [blame] | 52 | /* |
| 53 | * Optimistically try the zone that the physical address mask falls |
| 54 | * into first. If that returns memory that isn't actually addressable |
| 55 | * we will fallback to the next lower zone and try again. |
| 56 | * |
| 57 | * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding |
| 58 | * zones. |
| 59 | */ |
Christoph Hellwig | 7bc5c42 | 2020-09-08 17:56:22 +0200 | [diff] [blame] | 60 | *phys_limit = dma_to_phys(dev, dma_limit); |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 61 | if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 62 | return GFP_DMA; |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 63 | if (*phys_limit <= DMA_BIT_MASK(32)) |
Christoph Hellwig | 7d21ee4 | 2018-09-06 20:30:54 -0400 | [diff] [blame] | 64 | return GFP_DMA32; |
| 65 | return 0; |
| 66 | } |
| 67 | |
Christoph Hellwig | 9420139 | 2020-08-14 12:26:24 +0200 | [diff] [blame] | 68 | static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 69 | { |
Jim Quinlan | e0d0727 | 2020-09-17 18:43:40 +0200 | [diff] [blame] | 70 | dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); |
| 71 | |
| 72 | if (dma_addr == DMA_MAPPING_ERROR) |
| 73 | return false; |
| 74 | return dma_addr + size - 1 <= |
| 75 | min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 76 | } |
| 77 | |
Christoph Hellwig | 4d05647 | 2021-10-18 13:18:34 +0200 | [diff] [blame] | 78 | static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) |
| 79 | { |
| 80 | if (!force_dma_unencrypted(dev)) |
| 81 | return 0; |
| 82 | return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size)); |
| 83 | } |
| 84 | |
| 85 | static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) |
| 86 | { |
Christoph Hellwig | a90cf30 | 2021-11-09 15:41:01 +0100 | [diff] [blame] | 87 | int ret; |
| 88 | |
Christoph Hellwig | 4d05647 | 2021-10-18 13:18:34 +0200 | [diff] [blame] | 89 | if (!force_dma_unencrypted(dev)) |
| 90 | return 0; |
Christoph Hellwig | a90cf30 | 2021-11-09 15:41:01 +0100 | [diff] [blame] | 91 | ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); |
| 92 | if (ret) |
| 93 | pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); |
| 94 | return ret; |
Christoph Hellwig | 4d05647 | 2021-10-18 13:18:34 +0200 | [diff] [blame] | 95 | } |
| 96 | |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 97 | static void __dma_direct_free_pages(struct device *dev, struct page *page, |
| 98 | size_t size) |
| 99 | { |
Christoph Hellwig | f5d3939 | 2021-10-21 09:34:59 +0200 | [diff] [blame] | 100 | if (swiotlb_free(dev, page, size)) |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 101 | return; |
| 102 | dma_free_contiguous(dev, page, size); |
| 103 | } |
| 104 | |
Christoph Hellwig | aea7e2a | 2021-10-21 09:39:12 +0200 | [diff] [blame] | 105 | static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) |
| 106 | { |
| 107 | struct page *page = swiotlb_alloc(dev, size); |
| 108 | |
| 109 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
| 110 | swiotlb_free(dev, page, size); |
| 111 | return NULL; |
| 112 | } |
| 113 | |
| 114 | return page; |
| 115 | } |
| 116 | |
Christoph Hellwig | 26749b3 | 2020-06-15 08:52:31 +0200 | [diff] [blame] | 117 | static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, |
Christoph Hellwig | 3773dfe | 2020-08-17 17:14:28 +0200 | [diff] [blame] | 118 | gfp_t gfp) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 119 | { |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 120 | int node = dev_to_node(dev); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 121 | struct page *page = NULL; |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 122 | u64 phys_limit; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 123 | |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 124 | WARN_ON_ONCE(!PAGE_ALIGNED(size)); |
| 125 | |
Christoph Hellwig | aea7e2a | 2021-10-21 09:39:12 +0200 | [diff] [blame] | 126 | if (is_swiotlb_for_alloc(dev)) |
| 127 | return dma_direct_alloc_swiotlb(dev, size); |
| 128 | |
David Rientjes | c84dc6e | 2020-04-14 17:04:55 -0700 | [diff] [blame] | 129 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
| 130 | &phys_limit); |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 131 | page = dma_alloc_contiguous(dev, size, gfp); |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 132 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 133 | dma_free_contiguous(dev, page, size); |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 134 | page = NULL; |
| 135 | } |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 136 | again: |
Christoph Hellwig | 90ae409 | 2019-08-20 11:45:49 +0900 | [diff] [blame] | 137 | if (!page) |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 138 | page = alloc_pages_node(node, gfp, get_order(size)); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 139 | if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { |
Nicolin Chen | b1d2dc0 | 2019-05-23 21:06:32 -0700 | [diff] [blame] | 140 | dma_free_contiguous(dev, page, size); |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 141 | page = NULL; |
| 142 | |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 143 | if (IS_ENABLED(CONFIG_ZONE_DMA32) && |
Nicolas Saenz Julienne | a7ba70f | 2019-11-21 10:26:44 +0100 | [diff] [blame] | 144 | phys_limit < DMA_BIT_MASK(64) && |
Takashi Iwai | de7eab3 | 2018-04-16 17:18:19 +0200 | [diff] [blame] | 145 | !(gfp & (GFP_DMA32 | GFP_DMA))) { |
| 146 | gfp |= GFP_DMA32; |
| 147 | goto again; |
| 148 | } |
| 149 | |
Christoph Hellwig | fbce251 | 2019-02-13 08:01:03 +0100 | [diff] [blame] | 150 | if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { |
Christoph Hellwig | 95f1839 | 2018-01-09 23:40:57 +0100 | [diff] [blame] | 151 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; |
| 152 | goto again; |
| 153 | } |
| 154 | } |
| 155 | |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 156 | return page; |
| 157 | } |
| 158 | |
Christoph Hellwig | 28e4576 | 2021-11-09 15:50:28 +0100 | [diff] [blame] | 159 | /* |
| 160 | * Check if a potentially blocking operations needs to dip into the atomic |
| 161 | * pools for the given device/gfp. |
| 162 | */ |
| 163 | static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) |
| 164 | { |
| 165 | return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); |
| 166 | } |
| 167 | |
Christoph Hellwig | 5b138c5 | 2020-10-07 11:06:09 +0200 | [diff] [blame] | 168 | static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, |
| 169 | dma_addr_t *dma_handle, gfp_t gfp) |
| 170 | { |
| 171 | struct page *page; |
| 172 | u64 phys_mask; |
| 173 | void *ret; |
| 174 | |
Christoph Hellwig | 78bc727 | 2021-10-21 10:00:55 +0200 | [diff] [blame] | 175 | if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))) |
| 176 | return NULL; |
| 177 | |
Christoph Hellwig | 5b138c5 | 2020-10-07 11:06:09 +0200 | [diff] [blame] | 178 | gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, |
| 179 | &phys_mask); |
| 180 | page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); |
| 181 | if (!page) |
| 182 | return NULL; |
| 183 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
| 184 | return ret; |
| 185 | } |
| 186 | |
Christoph Hellwig | d541ae5 | 2021-10-18 13:08:07 +0200 | [diff] [blame] | 187 | static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, |
| 188 | dma_addr_t *dma_handle, gfp_t gfp) |
| 189 | { |
| 190 | struct page *page; |
| 191 | |
| 192 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); |
| 193 | if (!page) |
| 194 | return NULL; |
| 195 | |
| 196 | /* remove any dirty cache lines on the kernel alias */ |
| 197 | if (!PageHighMem(page)) |
| 198 | arch_dma_prep_coherent(page, size); |
| 199 | |
| 200 | /* return the page pointer as the opaque cookie */ |
| 201 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
| 202 | return page; |
| 203 | } |
| 204 | |
Christoph Hellwig | 2f5388a2 | 2020-08-17 17:06:40 +0200 | [diff] [blame] | 205 | void *dma_direct_alloc(struct device *dev, size_t size, |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 206 | dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) |
| 207 | { |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 208 | bool remap = false, set_uncached = false; |
Christoph Hellwig | b18814e7 | 2018-11-04 17:27:56 +0100 | [diff] [blame] | 209 | struct page *page; |
| 210 | void *ret; |
| 211 | |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 212 | size = PAGE_ALIGN(size); |
Christoph Hellwig | 3773dfe | 2020-08-17 17:14:28 +0200 | [diff] [blame] | 213 | if (attrs & DMA_ATTR_NO_WARN) |
| 214 | gfp |= __GFP_NOWARN; |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 215 | |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 216 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
Christoph Hellwig | d541ae5 | 2021-10-18 13:08:07 +0200 | [diff] [blame] | 217 | !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) |
| 218 | return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 219 | |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 220 | if (!dev_is_dma_coherent(dev)) { |
| 221 | /* |
| 222 | * Fallback to the arch handler if it exists. This should |
| 223 | * eventually go away. |
| 224 | */ |
| 225 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
| 226 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
| 227 | !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && |
| 228 | !is_swiotlb_for_alloc(dev)) |
| 229 | return arch_dma_alloc(dev, size, dma_handle, gfp, |
| 230 | attrs); |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 231 | |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 232 | /* |
| 233 | * If there is a global pool, always allocate from it for |
| 234 | * non-coherent devices. |
| 235 | */ |
| 236 | if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) |
| 237 | return dma_alloc_from_global_coherent(dev, size, |
| 238 | dma_handle); |
| 239 | |
| 240 | /* |
| 241 | * Otherwise remap if the architecture is asking for it. But |
| 242 | * given that remapping memory is a blocking operation we'll |
| 243 | * instead have to dip into the atomic pools. |
| 244 | */ |
| 245 | remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); |
| 246 | if (remap) { |
Christoph Hellwig | 28e4576 | 2021-11-09 15:50:28 +0100 | [diff] [blame] | 247 | if (dma_direct_use_pool(dev, gfp)) |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 248 | return dma_direct_alloc_from_pool(dev, size, |
| 249 | dma_handle, gfp); |
| 250 | } else { |
Christoph Hellwig | 955f58f | 2021-11-09 15:47:56 +0100 | [diff] [blame] | 251 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) |
| 252 | return NULL; |
| 253 | set_uncached = true; |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 254 | } |
| 255 | } |
Christoph Hellwig | faf4ef8 | 2021-06-23 14:21:16 +0200 | [diff] [blame] | 256 | |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 257 | /* |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 258 | * Decrypting memory may block, so allocate the memory from the atomic |
| 259 | * pools if we can't block. |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 260 | */ |
Christoph Hellwig | 28e4576 | 2021-11-09 15:50:28 +0100 | [diff] [blame] | 261 | if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) |
Christoph Hellwig | 5b138c5 | 2020-10-07 11:06:09 +0200 | [diff] [blame] | 262 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 263 | |
Christoph Hellwig | 3773dfe | 2020-08-17 17:14:28 +0200 | [diff] [blame] | 264 | /* we always manually zero the memory once we are done */ |
| 265 | page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 266 | if (!page) |
| 267 | return NULL; |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 268 | if (PageHighMem(page)) { |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 269 | /* |
| 270 | * Depending on the cma= arguments and per-arch setup, |
| 271 | * dma_alloc_contiguous could return highmem pages. |
| 272 | * Without remapping there is no way to return them here, so |
| 273 | * log an error and fail. |
| 274 | */ |
| 275 | if (!IS_ENABLED(CONFIG_DMA_REMAP)) { |
| 276 | dev_info(dev, "Rejecting highmem page from CMA.\n"); |
| 277 | goto out_free_pages; |
| 278 | } |
| 279 | remap = true; |
Christoph Hellwig | a86d109 | 2021-10-21 09:47:31 +0200 | [diff] [blame] | 280 | set_uncached = false; |
| 281 | } |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 282 | |
| 283 | if (remap) { |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 284 | /* remove any dirty cache lines on the kernel alias */ |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 285 | arch_dma_prep_coherent(page, size); |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 286 | |
| 287 | /* create a coherent mapping */ |
David Rientjes | 633d5fc | 2020-06-11 12:20:28 -0700 | [diff] [blame] | 288 | ret = dma_common_contiguous_remap(page, size, |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 289 | dma_pgprot(dev, PAGE_KERNEL, attrs), |
| 290 | __builtin_return_address(0)); |
Christoph Hellwig | 3d0fc34 | 2020-02-21 12:26:00 -0800 | [diff] [blame] | 291 | if (!ret) |
| 292 | goto out_free_pages; |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 293 | } else { |
| 294 | ret = page_address(page); |
| 295 | if (dma_set_decrypted(dev, ret, size)) |
| 296 | goto out_free_pages; |
Christoph Hellwig | d98849a | 2019-06-14 16:17:27 +0200 | [diff] [blame] | 297 | } |
| 298 | |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 299 | memset(ret, 0, size); |
Christoph Hellwig | c30700d | 2019-06-03 08:43:51 +0200 | [diff] [blame] | 300 | |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 301 | if (set_uncached) { |
Christoph Hellwig | c30700d | 2019-06-03 08:43:51 +0200 | [diff] [blame] | 302 | arch_dma_prep_coherent(page, size); |
Christoph Hellwig | fa7e224 | 2020-02-21 15:55:43 -0800 | [diff] [blame] | 303 | ret = arch_dma_set_uncached(ret, size); |
| 304 | if (IS_ERR(ret)) |
David Rientjes | 96a539f | 2020-06-11 12:20:29 -0700 | [diff] [blame] | 305 | goto out_encrypt_pages; |
Christoph Hellwig | c30700d | 2019-06-03 08:43:51 +0200 | [diff] [blame] | 306 | } |
Christoph Hellwig | f3c9622 | 2021-11-09 15:20:40 +0100 | [diff] [blame] | 307 | |
Christoph Hellwig | 96eb89c | 2020-08-17 17:20:52 +0200 | [diff] [blame] | 308 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 309 | return ret; |
David Rientjes | 96a539f | 2020-06-11 12:20:29 -0700 | [diff] [blame] | 310 | |
| 311 | out_encrypt_pages: |
Christoph Hellwig | 4d05647 | 2021-10-18 13:18:34 +0200 | [diff] [blame] | 312 | if (dma_set_encrypted(dev, page_address(page), size)) |
| 313 | return NULL; |
Christoph Hellwig | 3d0fc34 | 2020-02-21 12:26:00 -0800 | [diff] [blame] | 314 | out_free_pages: |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 315 | __dma_direct_free_pages(dev, page, size); |
Christoph Hellwig | 3d0fc34 | 2020-02-21 12:26:00 -0800 | [diff] [blame] | 316 | return NULL; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 317 | } |
| 318 | |
Christoph Hellwig | 2f5388a2 | 2020-08-17 17:06:40 +0200 | [diff] [blame] | 319 | void dma_direct_free(struct device *dev, size_t size, |
| 320 | void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 321 | { |
Christoph Hellwig | c10f07a | 2018-03-19 11:38:25 +0100 | [diff] [blame] | 322 | unsigned int page_order = get_order(size); |
Christoph Hellwig | 080321d | 2017-12-22 11:51:44 +0100 | [diff] [blame] | 323 | |
Christoph Hellwig | cf14be0 | 2019-08-06 14:33:23 +0300 | [diff] [blame] | 324 | if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 325 | !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { |
Christoph Hellwig | d98849a | 2019-06-14 16:17:27 +0200 | [diff] [blame] | 326 | /* cpu_addr is a struct page cookie, not a kernel address */ |
Christoph Hellwig | acaade1 | 2019-10-29 09:57:09 +0100 | [diff] [blame] | 327 | dma_free_contiguous(dev, cpu_addr, size); |
Christoph Hellwig | d98849a | 2019-06-14 16:17:27 +0200 | [diff] [blame] | 328 | return; |
| 329 | } |
| 330 | |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 331 | if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && |
| 332 | !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && |
Christoph Hellwig | faf4ef8 | 2021-06-23 14:21:16 +0200 | [diff] [blame] | 333 | !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && |
Linus Torvalds | 3de18c8 | 2021-09-03 10:34:44 -0700 | [diff] [blame] | 334 | !dev_is_dma_coherent(dev) && |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 335 | !is_swiotlb_for_alloc(dev)) { |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 336 | arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); |
| 337 | return; |
| 338 | } |
| 339 | |
Christoph Hellwig | faf4ef8 | 2021-06-23 14:21:16 +0200 | [diff] [blame] | 340 | if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && |
| 341 | !dev_is_dma_coherent(dev)) { |
| 342 | if (!dma_release_from_global_coherent(page_order, cpu_addr)) |
| 343 | WARN_ON_ONCE(1); |
| 344 | return; |
| 345 | } |
| 346 | |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 347 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
| 348 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
| 349 | dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) |
| 350 | return; |
| 351 | |
Christoph Hellwig | 5570449 | 2021-10-21 09:20:39 +0200 | [diff] [blame] | 352 | if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 353 | vunmap(cpu_addr); |
Christoph Hellwig | 5570449 | 2021-10-21 09:20:39 +0200 | [diff] [blame] | 354 | } else { |
| 355 | if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) |
| 356 | arch_dma_clear_uncached(cpu_addr, size); |
Christoph Hellwig | a90cf30 | 2021-11-09 15:41:01 +0100 | [diff] [blame] | 357 | if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) |
| 358 | return; |
Christoph Hellwig | 5570449 | 2021-10-21 09:20:39 +0200 | [diff] [blame] | 359 | } |
Christoph Hellwig | 3acac06 | 2019-10-29 11:06:32 +0100 | [diff] [blame] | 360 | |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 361 | __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 362 | } |
| 363 | |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 364 | struct page *dma_direct_alloc_pages(struct device *dev, size_t size, |
| 365 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
| 366 | { |
| 367 | struct page *page; |
| 368 | void *ret; |
| 369 | |
Christoph Hellwig | 28e4576 | 2021-11-09 15:50:28 +0100 | [diff] [blame] | 370 | if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) |
Christoph Hellwig | 5b138c5 | 2020-10-07 11:06:09 +0200 | [diff] [blame] | 371 | return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 372 | |
| 373 | page = __dma_direct_alloc_pages(dev, size, gfp); |
| 374 | if (!page) |
| 375 | return NULL; |
Christoph Hellwig | 08a89c2 | 2020-09-26 16:39:36 +0200 | [diff] [blame] | 376 | if (PageHighMem(page)) { |
| 377 | /* |
| 378 | * Depending on the cma= arguments and per-arch setup |
| 379 | * dma_alloc_contiguous could return highmem pages. |
| 380 | * Without remapping there is no way to return them here, |
| 381 | * so log an error and fail. |
| 382 | */ |
| 383 | dev_info(dev, "Rejecting highmem page from CMA.\n"); |
| 384 | goto out_free_pages; |
| 385 | } |
| 386 | |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 387 | ret = page_address(page); |
Christoph Hellwig | 4d05647 | 2021-10-18 13:18:34 +0200 | [diff] [blame] | 388 | if (dma_set_decrypted(dev, ret, size)) |
| 389 | goto out_free_pages; |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 390 | memset(ret, 0, size); |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 391 | *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); |
| 392 | return page; |
| 393 | out_free_pages: |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 394 | __dma_direct_free_pages(dev, page, size); |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 395 | return NULL; |
| 396 | } |
| 397 | |
| 398 | void dma_direct_free_pages(struct device *dev, size_t size, |
| 399 | struct page *page, dma_addr_t dma_addr, |
| 400 | enum dma_data_direction dir) |
| 401 | { |
| 402 | unsigned int page_order = get_order(size); |
| 403 | void *vaddr = page_address(page); |
| 404 | |
| 405 | /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ |
Christoph Hellwig | 849face | 2020-10-07 11:04:08 +0200 | [diff] [blame] | 406 | if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 407 | dma_free_from_pool(dev, vaddr, size)) |
| 408 | return; |
| 409 | |
Christoph Hellwig | a90cf30 | 2021-11-09 15:41:01 +0100 | [diff] [blame] | 410 | if (dma_set_encrypted(dev, vaddr, 1 << page_order)) |
| 411 | return; |
Claire Chang | f4111e3 | 2021-06-19 11:40:40 +0800 | [diff] [blame] | 412 | __dma_direct_free_pages(dev, page, size); |
Christoph Hellwig | efa70f2 | 2020-09-01 13:34:33 +0200 | [diff] [blame] | 413 | } |
| 414 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 415 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 416 | defined(CONFIG_SWIOTLB) |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 417 | void dma_direct_sync_sg_for_device(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 418 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 419 | { |
| 420 | struct scatterlist *sg; |
| 421 | int i; |
| 422 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 423 | for_each_sg(sgl, sg, nents, i) { |
Fugang Duan | 449fa54 | 2019-07-19 17:26:48 +0800 | [diff] [blame] | 424 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
| 425 | |
Claire Chang | 7fd856a | 2021-06-19 11:40:35 +0800 | [diff] [blame] | 426 | if (unlikely(is_swiotlb_buffer(dev, paddr))) |
Christoph Hellwig | 80808d2 | 2021-03-01 08:44:26 +0100 | [diff] [blame] | 427 | swiotlb_sync_single_for_device(dev, paddr, sg->length, |
| 428 | dir); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 429 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 430 | if (!dev_is_dma_coherent(dev)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 431 | arch_sync_dma_for_device(paddr, sg->length, |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 432 | dir); |
| 433 | } |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 434 | } |
Christoph Hellwig | 17ac524 | 2018-12-03 11:14:09 +0100 | [diff] [blame] | 435 | #endif |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 436 | |
| 437 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 438 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ |
| 439 | defined(CONFIG_SWIOTLB) |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 440 | void dma_direct_sync_sg_for_cpu(struct device *dev, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 441 | struct scatterlist *sgl, int nents, enum dma_data_direction dir) |
| 442 | { |
| 443 | struct scatterlist *sg; |
| 444 | int i; |
| 445 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 446 | for_each_sg(sgl, sg, nents, i) { |
Fugang Duan | 449fa54 | 2019-07-19 17:26:48 +0800 | [diff] [blame] | 447 | phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); |
| 448 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 449 | if (!dev_is_dma_coherent(dev)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 450 | arch_sync_dma_for_cpu(paddr, sg->length, dir); |
Fugang Duan | 449fa54 | 2019-07-19 17:26:48 +0800 | [diff] [blame] | 451 | |
Claire Chang | 7fd856a | 2021-06-19 11:40:35 +0800 | [diff] [blame] | 452 | if (unlikely(is_swiotlb_buffer(dev, paddr))) |
Christoph Hellwig | 80808d2 | 2021-03-01 08:44:26 +0100 | [diff] [blame] | 453 | swiotlb_sync_single_for_cpu(dev, paddr, sg->length, |
| 454 | dir); |
Christoph Hellwig | abdaf11 | 2020-08-17 16:41:50 +0200 | [diff] [blame] | 455 | |
| 456 | if (dir == DMA_FROM_DEVICE) |
| 457 | arch_dma_mark_clean(paddr, sg->length); |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 458 | } |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 459 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 460 | if (!dev_is_dma_coherent(dev)) |
Christoph Hellwig | 56e35f9 | 2019-11-07 18:03:11 +0100 | [diff] [blame] | 461 | arch_sync_dma_for_cpu_all(); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 462 | } |
| 463 | |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 464 | void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 465 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 466 | { |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 467 | struct scatterlist *sg; |
| 468 | int i; |
| 469 | |
| 470 | for_each_sg(sgl, sg, nents, i) |
| 471 | dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, |
| 472 | attrs); |
Christoph Hellwig | bc3ec75 | 2018-09-08 11:22:43 +0200 | [diff] [blame] | 473 | } |
| 474 | #endif |
| 475 | |
Christoph Hellwig | 782e676 | 2018-04-16 15:24:51 +0200 | [diff] [blame] | 476 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 477 | enum dma_data_direction dir, unsigned long attrs) |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 478 | { |
| 479 | int i; |
| 480 | struct scatterlist *sg; |
| 481 | |
| 482 | for_each_sg(sgl, sg, nents, i) { |
Christoph Hellwig | 17ac524 | 2018-12-03 11:14:09 +0100 | [diff] [blame] | 483 | sg->dma_address = dma_direct_map_page(dev, sg_page(sg), |
| 484 | sg->offset, sg->length, dir, attrs); |
| 485 | if (sg->dma_address == DMA_MAPPING_ERROR) |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 486 | goto out_unmap; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 487 | sg_dma_len(sg) = sg->length; |
| 488 | } |
| 489 | |
| 490 | return nents; |
Christoph Hellwig | 55897af | 2018-12-03 11:43:54 +0100 | [diff] [blame] | 491 | |
| 492 | out_unmap: |
| 493 | dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); |
Logan Gunthorpe | c81be74 | 2021-07-29 14:15:20 -0600 | [diff] [blame] | 494 | return -EIO; |
Christoph Hellwig | 002e674 | 2018-01-09 16:30:23 +0100 | [diff] [blame] | 495 | } |
| 496 | |
Christoph Hellwig | cfced78 | 2019-01-04 18:20:05 +0100 | [diff] [blame] | 497 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, |
| 498 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 499 | { |
| 500 | dma_addr_t dma_addr = paddr; |
| 501 | |
Christoph Hellwig | 68a33b1 | 2019-11-19 17:38:58 +0100 | [diff] [blame] | 502 | if (unlikely(!dma_capable(dev, dma_addr, size, false))) { |
Christoph Hellwig | 75467ee | 2020-02-03 14:54:50 +0100 | [diff] [blame] | 503 | dev_err_once(dev, |
| 504 | "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", |
| 505 | &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); |
| 506 | WARN_ON_ONCE(1); |
Christoph Hellwig | cfced78 | 2019-01-04 18:20:05 +0100 | [diff] [blame] | 507 | return DMA_MAPPING_ERROR; |
| 508 | } |
| 509 | |
| 510 | return dma_addr; |
| 511 | } |
Christoph Hellwig | cfced78 | 2019-01-04 18:20:05 +0100 | [diff] [blame] | 512 | |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 513 | int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 514 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 515 | unsigned long attrs) |
| 516 | { |
| 517 | struct page *page = dma_direct_to_page(dev, dma_addr); |
| 518 | int ret; |
| 519 | |
| 520 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); |
| 521 | if (!ret) |
| 522 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); |
| 523 | return ret; |
| 524 | } |
| 525 | |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 526 | bool dma_direct_can_mmap(struct device *dev) |
| 527 | { |
| 528 | return dev_is_dma_coherent(dev) || |
| 529 | IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); |
| 530 | } |
| 531 | |
| 532 | int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, |
| 533 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 534 | unsigned long attrs) |
| 535 | { |
| 536 | unsigned long user_count = vma_pages(vma); |
| 537 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 538 | unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); |
| 539 | int ret = -ENXIO; |
| 540 | |
| 541 | vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); |
| 542 | |
| 543 | if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) |
| 544 | return ret; |
Christoph Hellwig | faf4ef8 | 2021-06-23 14:21:16 +0200 | [diff] [blame] | 545 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) |
| 546 | return ret; |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 547 | |
| 548 | if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) |
| 549 | return -ENXIO; |
| 550 | return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, |
| 551 | user_count << PAGE_SHIFT, vma->vm_page_prot); |
| 552 | } |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 553 | |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 554 | int dma_direct_supported(struct device *dev, u64 mask) |
| 555 | { |
Christoph Hellwig | 91ef26f | 2020-02-03 18:11:10 +0100 | [diff] [blame] | 556 | u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 557 | |
Christoph Hellwig | 91ef26f | 2020-02-03 18:11:10 +0100 | [diff] [blame] | 558 | /* |
| 559 | * Because 32-bit DMA masks are so common we expect every architecture |
| 560 | * to be able to satisfy them - either by not supporting more physical |
| 561 | * memory, or by providing a ZONE_DMA32. If neither is the case, the |
| 562 | * architecture needs to use an IOMMU instead of the direct mapping. |
| 563 | */ |
| 564 | if (mask >= DMA_BIT_MASK(32)) |
| 565 | return 1; |
Christoph Hellwig | 9d7a224 | 2018-09-07 09:31:58 +0200 | [diff] [blame] | 566 | |
Lendacky, Thomas | c92a54c | 2018-12-17 14:39:16 +0000 | [diff] [blame] | 567 | /* |
Christoph Hellwig | 5ceda74 | 2020-08-17 17:34:03 +0200 | [diff] [blame] | 568 | * This check needs to be against the actual bit mask value, so use |
| 569 | * phys_to_dma_unencrypted() here so that the SME encryption mask isn't |
Lendacky, Thomas | c92a54c | 2018-12-17 14:39:16 +0000 | [diff] [blame] | 570 | * part of the check. |
| 571 | */ |
Christoph Hellwig | 91ef26f | 2020-02-03 18:11:10 +0100 | [diff] [blame] | 572 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
| 573 | min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); |
Christoph Hellwig | 5ceda74 | 2020-08-17 17:34:03 +0200 | [diff] [blame] | 574 | return mask >= phys_to_dma_unencrypted(dev, min_mask); |
Christoph Hellwig | 1a9777a | 2017-12-24 15:04:32 +0100 | [diff] [blame] | 575 | } |
Joerg Roedel | 133d624 | 2019-02-07 12:59:15 +0100 | [diff] [blame] | 576 | |
| 577 | size_t dma_direct_max_mapping_size(struct device *dev) |
| 578 | { |
Joerg Roedel | 133d624 | 2019-02-07 12:59:15 +0100 | [diff] [blame] | 579 | /* If SWIOTLB is active, use its maximum mapping size */ |
Claire Chang | 6f2beb2 | 2021-06-19 11:40:36 +0800 | [diff] [blame] | 580 | if (is_swiotlb_active(dev) && |
Claire Chang | 903cd0f | 2021-06-24 23:55:20 +0800 | [diff] [blame] | 581 | (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) |
Christoph Hellwig | a5008b5 | 2019-07-16 22:00:54 +0200 | [diff] [blame] | 582 | return swiotlb_max_mapping_size(dev); |
| 583 | return SIZE_MAX; |
Joerg Roedel | 133d624 | 2019-02-07 12:59:15 +0100 | [diff] [blame] | 584 | } |
Christoph Hellwig | 3aa916250 | 2020-06-29 15:03:56 +0200 | [diff] [blame] | 585 | |
| 586 | bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) |
| 587 | { |
| 588 | return !dev_is_dma_coherent(dev) || |
Claire Chang | 7fd856a | 2021-06-19 11:40:35 +0800 | [diff] [blame] | 589 | is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); |
Christoph Hellwig | 3aa916250 | 2020-06-29 15:03:56 +0200 | [diff] [blame] | 590 | } |
Jim Quinlan | e0d0727 | 2020-09-17 18:43:40 +0200 | [diff] [blame] | 591 | |
| 592 | /** |
| 593 | * dma_direct_set_offset - Assign scalar offset for a single DMA range. |
| 594 | * @dev: device pointer; needed to "own" the alloced memory. |
| 595 | * @cpu_start: beginning of memory region covered by this offset. |
| 596 | * @dma_start: beginning of DMA/PCI region covered by this offset. |
| 597 | * @size: size of the region. |
| 598 | * |
| 599 | * This is for the simple case of a uniform offset which cannot |
| 600 | * be discovered by "dma-ranges". |
| 601 | * |
| 602 | * It returns -ENOMEM if out of memory, -EINVAL if a map |
| 603 | * already exists, 0 otherwise. |
| 604 | * |
| 605 | * Note: any call to this from a driver is a bug. The mapping needs |
| 606 | * to be described by the device tree or other firmware interfaces. |
| 607 | */ |
| 608 | int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, |
| 609 | dma_addr_t dma_start, u64 size) |
| 610 | { |
| 611 | struct bus_dma_region *map; |
| 612 | u64 offset = (u64)cpu_start - (u64)dma_start; |
| 613 | |
| 614 | if (dev->dma_range_map) { |
| 615 | dev_err(dev, "attempt to add DMA range to existing map\n"); |
| 616 | return -EINVAL; |
| 617 | } |
| 618 | |
| 619 | if (!offset) |
| 620 | return 0; |
| 621 | |
| 622 | map = kcalloc(2, sizeof(*map), GFP_KERNEL); |
| 623 | if (!map) |
| 624 | return -ENOMEM; |
| 625 | map[0].cpu_start = cpu_start; |
| 626 | map[0].dma_start = dma_start; |
| 627 | map[0].offset = offset; |
| 628 | map[0].size = size; |
| 629 | dev->dma_range_map = map; |
| 630 | return 0; |
| 631 | } |