blob: b59f1b6be3e9fc46d1dfb359317c1023d46f0500 [file] [log] [blame]
Christoph Hellwig782e6762018-04-16 15:24:51 +02001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_DMA_NONCOHERENT_H
3#define _LINUX_DMA_NONCOHERENT_H 1
4
5#include <linux/dma-mapping.h>
Christoph Hellwig419e2f12019-08-26 09:03:44 +02006#include <asm/pgtable.h>
Christoph Hellwig782e6762018-04-16 15:24:51 +02007
Christoph Hellwigf3ecc0f2018-08-19 14:53:20 +02008#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
9#include <asm/dma-coherence.h>
10#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
11 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
12 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
13static inline bool dev_is_dma_coherent(struct device *dev)
14{
15 return dev->dma_coherent;
16}
17#else
18static inline bool dev_is_dma_coherent(struct device *dev)
19{
20 return true;
21}
22#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
23
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020024/*
25 * Check if an allocation needs to be marked uncached to be coherent.
26 */
Christoph Hellwig15ffe5e2019-07-08 12:55:27 -070027static __always_inline bool dma_alloc_need_uncached(struct device *dev,
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020028 unsigned long attrs)
29{
30 if (dev_is_dma_coherent(dev))
31 return false;
Christoph Hellwigd98849a2019-06-14 16:17:27 +020032 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
33 return false;
Christoph Hellwig4b85fae2019-06-14 16:06:10 +020034 if (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
35 (attrs & DMA_ATTR_NON_CONSISTENT))
36 return false;
37 return true;
38}
39
Christoph Hellwig782e6762018-04-16 15:24:51 +020040void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
41 gfp_t gfp, unsigned long attrs);
42void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
43 dma_addr_t dma_addr, unsigned long attrs);
Christoph Hellwig33dcb372019-07-26 09:26:40 +020044
45#ifdef CONFIG_MMU
Christoph Hellwig419e2f12019-08-26 09:03:44 +020046/*
47 * Page protection so that devices that can't snoop CPU caches can use the
48 * memory coherently. We default to pgprot_noncached which is usually used
49 * for ioremap as a safe bet, but architectures can override this with less
50 * strict semantics if possible.
51 */
52#ifndef pgprot_dmacoherent
53#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
54#endif
55
Christoph Hellwig33dcb372019-07-26 09:26:40 +020056pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
Christoph Hellwig58b04402018-09-11 08:55:28 +020057#else
Christoph Hellwig33dcb372019-07-26 09:26:40 +020058static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
59 unsigned long attrs)
60{
61 return prot; /* no protection bits supported without page tables */
62}
63#endif /* CONFIG_MMU */
Christoph Hellwig782e6762018-04-16 15:24:51 +020064
65#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
66void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
67 enum dma_data_direction direction);
68#else
Christoph Hellwig356da6d2018-12-06 13:39:32 -080069static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
70 size_t size, enum dma_data_direction direction)
71{
72}
Christoph Hellwig782e6762018-04-16 15:24:51 +020073#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
74
75#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
Christoph Hellwig56e35f92019-11-07 18:03:11 +010076void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
77 enum dma_data_direction dir);
Christoph Hellwig782e6762018-04-16 15:24:51 +020078#else
Christoph Hellwig56e35f92019-11-07 18:03:11 +010079static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
80 enum dma_data_direction dir)
Christoph Hellwig782e6762018-04-16 15:24:51 +020081{
82}
83#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
84
85#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
Christoph Hellwig56e35f92019-11-07 18:03:11 +010086void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
87 enum dma_data_direction dir);
Christoph Hellwig782e6762018-04-16 15:24:51 +020088#else
Christoph Hellwig56e35f92019-11-07 18:03:11 +010089static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
90 enum dma_data_direction dir)
Christoph Hellwig782e6762018-04-16 15:24:51 +020091{
92}
93#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
94
Christoph Hellwigfaef8772018-06-15 13:08:51 +020095#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
Christoph Hellwig56e35f92019-11-07 18:03:11 +010096void arch_sync_dma_for_cpu_all(void);
Christoph Hellwigfaef8772018-06-15 13:08:51 +020097#else
Christoph Hellwig56e35f92019-11-07 18:03:11 +010098static inline void arch_sync_dma_for_cpu_all(void)
Christoph Hellwigfaef8772018-06-15 13:08:51 +020099{
100}
101#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
102
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +0100103#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100104void arch_dma_prep_coherent(struct page *page, size_t size);
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +0100105#else
106static inline void arch_dma_prep_coherent(struct page *page, size_t size)
107{
108}
109#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100110
Christoph Hellwigfa7e2242020-02-21 15:55:43 -0800111void *arch_dma_set_uncached(void *addr, size_t size);
Christoph Hellwig999a5d12020-02-21 12:35:05 -0800112void arch_dma_clear_uncached(void *addr, size_t size);
Christoph Hellwigc30700d2019-06-03 08:43:51 +0200113
Christoph Hellwig782e6762018-04-16 15:24:51 +0200114#endif /* _LINUX_DMA_NONCOHERENT_H */