blob: 79e9a757387f6fa255c738d58257f4d38649fa63 [file] [log] [blame]
Christoph Hellwig782e6762018-04-16 15:24:51 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Christoph Hellwig.
4 *
5 * DMA operations that map physical memory directly without providing cache
6 * coherence.
7 */
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-noncoherent.h>
12#include <linux/scatterlist.h>
13
14static void dma_noncoherent_sync_single_for_device(struct device *dev,
15 dma_addr_t addr, size_t size, enum dma_data_direction dir)
16{
17 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
18}
19
20static void dma_noncoherent_sync_sg_for_device(struct device *dev,
21 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
22{
23 struct scatterlist *sg;
24 int i;
25
26 for_each_sg(sgl, sg, nents, i)
27 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
28}
29
30static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size, enum dma_data_direction dir,
32 unsigned long attrs)
33{
34 dma_addr_t addr;
35
36 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
37 if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
38 arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
39 size, dir);
40 return addr;
41}
42
43static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
44 int nents, enum dma_data_direction dir, unsigned long attrs)
45{
46 nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
47 if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
48 dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
49 return nents;
50}
51
52#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
53static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
54 dma_addr_t addr, size_t size, enum dma_data_direction dir)
55{
56 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
57}
58
59static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
60 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
61{
62 struct scatterlist *sg;
63 int i;
64
65 for_each_sg(sgl, sg, nents, i)
66 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
67}
68
69static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
70 size_t size, enum dma_data_direction dir, unsigned long attrs)
71{
72 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
73 dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
74}
75
76static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
77 int nents, enum dma_data_direction dir, unsigned long attrs)
78{
79 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
80 dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
81}
82#endif
83
84const struct dma_map_ops dma_noncoherent_ops = {
85 .alloc = arch_dma_alloc,
86 .free = arch_dma_free,
87 .mmap = arch_dma_mmap,
88 .sync_single_for_device = dma_noncoherent_sync_single_for_device,
89 .sync_sg_for_device = dma_noncoherent_sync_sg_for_device,
90 .map_page = dma_noncoherent_map_page,
91 .map_sg = dma_noncoherent_map_sg,
92#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
93 .sync_single_for_cpu = dma_noncoherent_sync_single_for_cpu,
94 .sync_sg_for_cpu = dma_noncoherent_sync_sg_for_cpu,
95 .unmap_page = dma_noncoherent_unmap_page,
96 .unmap_sg = dma_noncoherent_unmap_sg,
97#endif
98 .dma_supported = dma_direct_supported,
99 .mapping_error = dma_direct_mapping_error,
100 .cache_sync = arch_dma_cache_sync,
101};
102EXPORT_SYMBOL(dma_noncoherent_ops);