blob: 1c35b7b945d034d320fb942109bb218c9c1eff8c [file] [log] [blame]
Christoph Hellwig002e6742018-01-09 16:30:23 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Christoph Hellwig2e86a042017-12-22 11:29:51 +01003 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
Christoph Hellwig002e6742018-01-09 16:30:23 +01005 */
6#include <linux/export.h>
7#include <linux/mm.h>
Christoph Hellwig2e86a042017-12-22 11:29:51 +01008#include <linux/dma-direct.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +01009#include <linux/scatterlist.h>
Christoph Hellwig080321d2017-12-22 11:51:44 +010010#include <linux/dma-contiguous.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010011#include <linux/pfn.h>
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010012#include <linux/set_memory.h>
Christoph Hellwig002e6742018-01-09 16:30:23 +010013
Christoph Hellwig27975962018-01-09 16:30:47 +010014#define DIRECT_MAPPING_ERROR 0
15
Christoph Hellwigc61e9632018-01-09 23:39:03 +010016/*
17 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
18 * some use it for entirely different regions:
19 */
20#ifndef ARCH_ZONE_DMA_BITS
21#define ARCH_ZONE_DMA_BITS 24
22#endif
23
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010024/*
25 * For AMD SEV all DMA must be to unencrypted addresses.
26 */
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
Christoph Hellwig27975962018-01-09 16:30:47 +010032static bool
33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
34 const char *caller)
35{
36 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
Christoph Hellwig2550bbfd2018-05-29 16:15:12 +020037 if (!dev->dma_mask) {
38 dev_err(dev,
39 "%s: call on device without dma_mask\n",
40 caller);
41 return false;
42 }
43
Christoph Hellwig27975962018-01-09 16:30:47 +010044 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
45 dev_err(dev,
46 "%s: overflow %pad+%zu of device mask %llx\n",
47 caller, &dma_addr, size, *dev->dma_mask);
48 }
49 return false;
50 }
51 return true;
52}
53
Christoph Hellwig95f18392018-01-09 23:40:57 +010054static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
55{
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010056 dma_addr_t addr = force_dma_unencrypted() ?
57 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
58 return addr + size - 1 <= dev->coherent_dma_mask;
Christoph Hellwig95f18392018-01-09 23:40:57 +010059}
60
Christoph Hellwig19dca8c2017-12-23 13:46:06 +010061void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t gfp, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +010063{
Christoph Hellwig080321d2017-12-22 11:51:44 +010064 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
65 int page_order = get_order(size);
66 struct page *page = NULL;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +010067 void *ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +010068
Christoph Hellwige89f5b32018-03-28 15:35:35 +020069 /* we always manually zero the memory once we are done: */
70 gfp &= ~__GFP_ZERO;
71
Christoph Hellwigc61e9632018-01-09 23:39:03 +010072 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
73 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
74 gfp |= GFP_DMA;
75 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
76 gfp |= GFP_DMA32;
77
Christoph Hellwig95f18392018-01-09 23:40:57 +010078again:
Christoph Hellwig080321d2017-12-22 11:51:44 +010079 /* CMA can be used only in the context which permits sleeping */
Christoph Hellwig95f18392018-01-09 23:40:57 +010080 if (gfpflags_allow_blocking(gfp)) {
Marek Szyprowskid834c5a2018-08-17 15:49:00 -070081 page = dma_alloc_from_contiguous(dev, count, page_order,
82 gfp & __GFP_NOWARN);
Christoph Hellwig95f18392018-01-09 23:40:57 +010083 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
84 dma_release_from_contiguous(dev, page, count);
85 page = NULL;
86 }
87 }
Christoph Hellwig080321d2017-12-22 11:51:44 +010088 if (!page)
Christoph Hellwig21f237e2017-12-22 11:55:23 +010089 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
Christoph Hellwig95f18392018-01-09 23:40:57 +010090
91 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
92 __free_pages(page, page_order);
93 page = NULL;
94
Takashi Iwaide7eab32018-04-16 17:18:19 +020095 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
96 dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
97 !(gfp & (GFP_DMA32 | GFP_DMA))) {
98 gfp |= GFP_DMA32;
99 goto again;
100 }
101
Takashi Iwai504a9182018-04-15 11:08:07 +0200102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
103 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
Christoph Hellwig95f18392018-01-09 23:40:57 +0100104 !(gfp & GFP_DMA)) {
105 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
106 goto again;
107 }
108 }
109
Christoph Hellwig080321d2017-12-22 11:51:44 +0100110 if (!page)
111 return NULL;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100112 ret = page_address(page);
113 if (force_dma_unencrypted()) {
114 set_memory_decrypted((unsigned long)ret, 1 << page_order);
115 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
116 } else {
117 *dma_handle = phys_to_dma(dev, page_to_phys(page));
118 }
119 memset(ret, 0, size);
120 return ret;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100121}
122
Christoph Hellwig42ed6452018-02-02 09:51:14 +0100123/*
124 * NOTE: this function must never look at the dma_addr argument, because we want
125 * to be able to use it as a helper for iommu implementations as well.
126 */
Christoph Hellwig19dca8c2017-12-23 13:46:06 +0100127void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100128 dma_addr_t dma_addr, unsigned long attrs)
129{
Christoph Hellwig080321d2017-12-22 11:51:44 +0100130 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100131 unsigned int page_order = get_order(size);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100132
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100133 if (force_dma_unencrypted())
134 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
Christoph Hellwig080321d2017-12-22 11:51:44 +0100135 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
Christoph Hellwigc10f07a2018-03-19 11:38:25 +0100136 free_pages((unsigned long)cpu_addr, page_order);
Christoph Hellwig002e6742018-01-09 16:30:23 +0100137}
138
Christoph Hellwig782e6762018-04-16 15:24:51 +0200139dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100140 unsigned long offset, size_t size, enum dma_data_direction dir,
141 unsigned long attrs)
142{
Christoph Hellwig27975962018-01-09 16:30:47 +0100143 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
144
145 if (!check_addr(dev, dma_addr, size, __func__))
146 return DIRECT_MAPPING_ERROR;
147 return dma_addr;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100148}
149
Christoph Hellwig782e6762018-04-16 15:24:51 +0200150int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
151 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwig002e6742018-01-09 16:30:23 +0100152{
153 int i;
154 struct scatterlist *sg;
155
156 for_each_sg(sgl, sg, nents, i) {
Christoph Hellwig002e6742018-01-09 16:30:23 +0100157 BUG_ON(!sg_page(sg));
Christoph Hellwig2e86a042017-12-22 11:29:51 +0100158
159 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
Christoph Hellwig27975962018-01-09 16:30:47 +0100160 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
161 return 0;
Christoph Hellwig002e6742018-01-09 16:30:23 +0100162 sg_dma_len(sg) = sg->length;
163 }
164
165 return nents;
166}
167
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100168int dma_direct_supported(struct device *dev, u64 mask)
169{
170#ifdef CONFIG_ZONE_DMA
171 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
172 return 0;
173#else
174 /*
175 * Because 32-bit DMA masks are so common we expect every architecture
176 * to be able to satisfy them - either by not supporting more physical
177 * memory, or by providing a ZONE_DMA32. If neither is the case, the
178 * architecture needs to use an IOMMU instead of the direct mapping.
179 */
180 if (mask < DMA_BIT_MASK(32))
181 return 0;
182#endif
Christoph Hellwigf068fe32018-04-27 09:02:55 +0200183 /*
Robin Murphyf07d1412018-07-23 23:16:07 +0100184 * Upstream PCI/PCIe bridges or SoC interconnects may not carry
185 * as many DMA address bits as the device itself supports.
Christoph Hellwigf068fe32018-04-27 09:02:55 +0200186 */
Robin Murphyf07d1412018-07-23 23:16:07 +0100187 if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
Christoph Hellwigf068fe32018-04-27 09:02:55 +0200188 return 0;
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100189 return 1;
190}
191
Christoph Hellwig782e6762018-04-16 15:24:51 +0200192int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
Christoph Hellwig27975962018-01-09 16:30:47 +0100193{
194 return dma_addr == DIRECT_MAPPING_ERROR;
195}
196
Christoph Hellwig002e6742018-01-09 16:30:23 +0100197const struct dma_map_ops dma_direct_ops = {
198 .alloc = dma_direct_alloc,
199 .free = dma_direct_free,
200 .map_page = dma_direct_map_page,
201 .map_sg = dma_direct_map_sg,
Christoph Hellwig1a9777a2017-12-24 15:04:32 +0100202 .dma_supported = dma_direct_supported,
Christoph Hellwig27975962018-01-09 16:30:47 +0100203 .mapping_error = dma_direct_mapping_error,
Christoph Hellwig002e6742018-01-09 16:30:23 +0100204};
205EXPORT_SYMBOL(dma_direct_ops);