blob: d19f9fef4ab93a2b4521502af838214b707e918a [file] [log] [blame]
Thomas Gleixnerec8f24b2019-05-19 13:07:45 +01001# SPDX-License-Identifier: GPL-2.0-only
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +02002
Christoph Hellwig846f9e12020-07-14 14:18:54 +02003config NO_DMA
4 bool
5
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +02006config HAS_DMA
7 bool
8 depends on !NO_DMA
9 default y
10
Christoph Hellwig2f9237d2020-07-08 09:30:00 +020011config DMA_OPS
Christoph Hellwigef1a85b2020-08-29 10:40:28 +020012 depends on HAS_DMA
Christoph Hellwig2f9237d2020-07-08 09:30:00 +020013 bool
14
Christoph Hellwigd35834c2020-03-23 18:19:30 +010015#
16# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
17# mapping fast path should select this option and set the dma_ops_bypass
18# flag in struct device where applicable
19#
20config DMA_OPS_BYPASS
21 bool
22
Alexey Kardashevskiy8d8d53c2020-10-29 12:52:40 +110023# Lets platform IOMMU driver choose between bypass and IOMMU
24config ARCH_HAS_DMA_MAP_DIRECT
25 bool
26
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020027config NEED_SG_DMA_LENGTH
28 bool
29
30config NEED_DMA_MAP_STATE
31 bool
32
33config ARCH_DMA_ADDR_T_64BIT
34 def_bool 64BIT || PHYS_ADDR_T_64BIT
35
Christoph Hellwig11ddce12019-02-13 08:01:22 +010036config ARCH_HAS_DMA_SET_MASK
37 bool
38
Christoph Hellwig419e2f12019-08-26 09:03:44 +020039#
40# Select this option if the architecture needs special handling for
41# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
42# people thing of when saying write combine, so very few platforms should
43# need to enable this.
44#
45config ARCH_HAS_DMA_WRITE_COMBINE
46 bool
47
Christoph Hellwigabdaf112020-08-17 16:41:50 +020048#
49# Select if the architectures provides the arch_dma_mark_clean hook
50#
51config ARCH_HAS_DMA_MARK_CLEAN
52 bool
53
Christoph Hellwigff4c25f2019-02-03 20:12:02 +010054config DMA_DECLARE_COHERENT
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020055 bool
56
Christoph Hellwig347cb6a2019-01-07 13:36:20 -050057config ARCH_HAS_SETUP_DMA_OPS
58 bool
59
Christoph Hellwigdc2acde2018-12-21 22:14:44 +010060config ARCH_HAS_TEARDOWN_DMA_OPS
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020061 bool
62
63config ARCH_HAS_SYNC_DMA_FOR_DEVICE
64 bool
65
66config ARCH_HAS_SYNC_DMA_FOR_CPU
67 bool
68 select NEED_DMA_MAP_STATE
69
Christoph Hellwig684f7e92018-09-11 08:54:57 +020070config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
71 bool
72
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +010073config ARCH_HAS_DMA_PREP_COHERENT
74 bool
75
Tom Lendacky9087c372019-07-10 19:01:19 +000076config ARCH_HAS_FORCE_DMA_UNENCRYPTED
77 bool
78
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020079config SWIOTLB
80 bool
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020081 select NEED_DMA_MAP_STATE
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020082
Claire Chang0b84e4f2021-06-19 11:40:41 +080083config DMA_RESTRICTED_POOL
84 bool "DMA Restricted Pool"
Claire Changf3c4b132021-08-27 11:48:02 +080085 depends on OF && OF_RESERVED_MEM && SWIOTLB
Claire Chang0b84e4f2021-06-19 11:40:41 +080086 help
87 This enables support for restricted DMA pools which provide a level of
88 DMA memory protection on systems with limited hardware protection
89 capabilities, such as those lacking an IOMMU.
90
91 For more information see
92 <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
93 and <kernel/dma/swiotlb.c>.
94 If unsure, say "n".
95
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +010096#
97# Should be selected if we can mmap non-coherent mappings to userspace.
98# The only thing that is really required is a way to set an uncached bit
99# in the pagetables
100#
101config DMA_NONCOHERENT_MMAP
Christoph Hellwig1fbf57d2020-06-10 10:29:49 +0200102 default y if !MMU
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100103 bool
104
David Rientjesdbed4522020-06-11 00:25:57 -0700105config DMA_COHERENT_POOL
Christoph Hellwigd07ae4c2020-06-18 17:23:31 +0200106 select GENERIC_ALLOCATOR
David Rientjesdbed4522020-06-11 00:25:57 -0700107 bool
108
Christoph Hellwigf0edfea2018-08-24 10:31:08 +0200109config DMA_REMAP
David Rientjesdbed4522020-06-11 00:25:57 -0700110 bool
Christoph Hellwigf0edfea2018-08-24 10:31:08 +0200111 depends on MMU
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100112 select DMA_NONCOHERENT_MMAP
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200113
David Rientjese860c292020-04-14 17:04:52 -0700114config DMA_DIRECT_REMAP
115 bool
David Rientjesdbed4522020-06-11 00:25:57 -0700116 select DMA_REMAP
David Rientjese860c292020-04-14 17:04:52 -0700117 select DMA_COHERENT_POOL
118
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100119config DMA_CMA
120 bool "DMA Contiguous Memory Allocator"
121 depends on HAVE_DMA_CONTIGUOUS && CMA
122 help
123 This enables the Contiguous Memory Allocator which allows drivers
124 to allocate big physically-contiguous blocks of memory for use with
125 hardware components that do not support I/O map nor scatter-gather.
126
127 You can disable CMA by specifying "cma=0" on the kernel's command
128 line.
129
Christoph Hellwig0b1abd12020-09-11 10:56:52 +0200130 For more information see <kernel/dma/contiguous.c>.
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100131 If unsure, say "n".
132
133if DMA_CMA
Barry Songb7176c22020-08-24 11:03:07 +1200134
135config DMA_PERNUMA_CMA
136 bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
137 default NUMA && ARM64
138 help
139 Enable this option to get pernuma CMA areas so that devices like
140 ARM64 SMMU can get local memory by DMA coherent APIs.
141
142 You can set the size of pernuma CMA by specifying "cma_pernuma=size"
143 on the kernel's command line.
144
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100145comment "Default contiguous memory area size:"
146
147config CMA_SIZE_MBYTES
148 int "Size in Mega Bytes"
149 depends on !CMA_SIZE_SEL_PERCENTAGE
150 default 0 if X86
151 default 16
152 help
153 Defines the size (in MiB) of the default memory area for Contiguous
154 Memory Allocator. If the size of 0 is selected, CMA is disabled by
155 default, but it can be enabled by passing cma=size[MG] to the kernel.
156
157
158config CMA_SIZE_PERCENTAGE
159 int "Percentage of total memory"
160 depends on !CMA_SIZE_SEL_MBYTES
161 default 0 if X86
162 default 10
163 help
164 Defines the size of the default memory area for Contiguous Memory
165 Allocator as a percentage of the total memory in the system.
166 If 0 percent is selected, CMA is disabled by default, but it can be
167 enabled by passing cma=size[MG] to the kernel.
168
169choice
170 prompt "Selected region size"
171 default CMA_SIZE_SEL_MBYTES
172
173config CMA_SIZE_SEL_MBYTES
174 bool "Use mega bytes value only"
175
176config CMA_SIZE_SEL_PERCENTAGE
177 bool "Use percentage value only"
178
179config CMA_SIZE_SEL_MIN
180 bool "Use lower value (minimum)"
181
182config CMA_SIZE_SEL_MAX
183 bool "Use higher value (maximum)"
184
185endchoice
186
187config CMA_ALIGNMENT
188 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
Paul Cercueil0de32792020-09-30 12:28:21 +0200189 range 2 12
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100190 default 8
191 help
192 DMA mapping framework by default aligns all buffers to the smallest
193 PAGE_SIZE order which is greater than or equal to the requested buffer
194 size. This works well for buffers up to a few hundreds kilobytes, but
195 for larger buffers it just a memory waste. With this parameter you can
196 specify the maximum PAGE_SIZE order for contiguous buffers. Larger
197 buffers will be aligned only to this specified order. The order is
198 expressed as a power of two multiplied by the PAGE_SIZE.
199
200 For example, if your system defaults to 4KiB pages, the order value
201 of 8 means that the buffers will be aligned up to 1MiB only.
202
203 If unsure, leave the default value "8".
204
205endif
206
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200207config DMA_API_DEBUG
208 bool "Enable debugging of DMA-API usage"
209 select NEED_DMA_MAP_STATE
210 help
211 Enable this option to debug the use of the DMA API by device drivers.
212 With this option you will be able to detect common bugs in device
213 drivers like double-freeing of DMA mappings or freeing mappings that
214 were never allocated.
215
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200216 This option causes a performance degradation. Use only if you want to
217 debug device drivers and dma interactions.
218
219 If unsure, say N.
220
221config DMA_API_DEBUG_SG
222 bool "Debug DMA scatter-gather usage"
223 default y
224 depends on DMA_API_DEBUG
225 help
226 Perform extra checking that callers of dma_map_sg() have respected the
227 appropriate segment length/boundary limits for the given device when
228 preparing DMA scatterlists.
229
230 This is particularly likely to have been overlooked in cases where the
231 dma_map_sg() API is used for general bulk mapping of pages rather than
232 preparing literal scatter-gather descriptors, where there is a risk of
233 unexpected behaviour from DMA API implementations if the scatterlist
234 is technically out-of-spec.
235
236 If unsure, say N.
Barry Song65789da2020-11-16 19:08:47 +1300237
238config DMA_MAP_BENCHMARK
239 bool "Enable benchmarking of streaming DMA mapping"
240 depends on DEBUG_FS
241 help
242 Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
243 performance of dma_(un)map_page.
244
245 See tools/testing/selftests/dma/dma_map_benchmark.c