blob: 1b02179758cbc94d6c129d6638fae544da5b2c24 [file] [log] [blame]
Thomas Gleixnerec8f24b2019-05-19 13:07:45 +01001# SPDX-License-Identifier: GPL-2.0-only
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +02002
Christoph Hellwig846f9e12020-07-14 14:18:54 +02003config NO_DMA
4 bool
5
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +02006config HAS_DMA
7 bool
8 depends on !NO_DMA
9 default y
10
Christoph Hellwig2f9237d2020-07-08 09:30:00 +020011config DMA_OPS
Christoph Hellwigef1a85b2020-08-29 10:40:28 +020012 depends on HAS_DMA
Christoph Hellwig2f9237d2020-07-08 09:30:00 +020013 bool
14
Christoph Hellwigd35834c2020-03-23 18:19:30 +010015#
16# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
17# mapping fast path should select this option and set the dma_ops_bypass
18# flag in struct device where applicable
19#
20config DMA_OPS_BYPASS
21 bool
22
Alexey Kardashevskiy8d8d53c2020-10-29 12:52:40 +110023# Lets platform IOMMU driver choose between bypass and IOMMU
24config ARCH_HAS_DMA_MAP_DIRECT
25 bool
26
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020027config NEED_SG_DMA_LENGTH
28 bool
29
30config NEED_DMA_MAP_STATE
31 bool
32
33config ARCH_DMA_ADDR_T_64BIT
34 def_bool 64BIT || PHYS_ADDR_T_64BIT
35
Christoph Hellwig11ddce12019-02-13 08:01:22 +010036config ARCH_HAS_DMA_SET_MASK
37 bool
38
Christoph Hellwig419e2f12019-08-26 09:03:44 +020039#
40# Select this option if the architecture needs special handling for
41# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
42# people thing of when saying write combine, so very few platforms should
43# need to enable this.
44#
45config ARCH_HAS_DMA_WRITE_COMBINE
46 bool
47
Christoph Hellwigabdaf112020-08-17 16:41:50 +020048#
49# Select if the architectures provides the arch_dma_mark_clean hook
50#
51config ARCH_HAS_DMA_MARK_CLEAN
52 bool
53
Christoph Hellwigff4c25f2019-02-03 20:12:02 +010054config DMA_DECLARE_COHERENT
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020055 bool
56
Christoph Hellwig347cb6a2019-01-07 13:36:20 -050057config ARCH_HAS_SETUP_DMA_OPS
58 bool
59
Christoph Hellwigdc2acde2018-12-21 22:14:44 +010060config ARCH_HAS_TEARDOWN_DMA_OPS
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020061 bool
62
63config ARCH_HAS_SYNC_DMA_FOR_DEVICE
64 bool
65
66config ARCH_HAS_SYNC_DMA_FOR_CPU
67 bool
68 select NEED_DMA_MAP_STATE
69
Christoph Hellwig684f7e92018-09-11 08:54:57 +020070config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
71 bool
72
Christoph Hellwig13bf5ce2019-03-25 15:44:06 +010073config ARCH_HAS_DMA_PREP_COHERENT
74 bool
75
Tom Lendacky9087c372019-07-10 19:01:19 +000076config ARCH_HAS_FORCE_DMA_UNENCRYPTED
77 bool
78
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020079config SWIOTLB
80 bool
Christoph Hellwigcf65a0f2018-06-12 19:01:45 +020081 select NEED_DMA_MAP_STATE
Christoph Hellwigf0edfea2018-08-24 10:31:08 +020082
Claire Chang0b84e4f2021-06-19 11:40:41 +080083config DMA_RESTRICTED_POOL
84 bool "DMA Restricted Pool"
Claire Changf3c4b132021-08-27 11:48:02 +080085 depends on OF && OF_RESERVED_MEM && SWIOTLB
Claire Chang0b84e4f2021-06-19 11:40:41 +080086 help
87 This enables support for restricted DMA pools which provide a level of
88 DMA memory protection on systems with limited hardware protection
89 capabilities, such as those lacking an IOMMU.
90
91 For more information see
92 <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
93 and <kernel/dma/swiotlb.c>.
94 If unsure, say "n".
95
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +010096#
97# Should be selected if we can mmap non-coherent mappings to userspace.
98# The only thing that is really required is a way to set an uncached bit
99# in the pagetables
100#
101config DMA_NONCOHERENT_MMAP
Christoph Hellwig1fbf57d2020-06-10 10:29:49 +0200102 default y if !MMU
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100103 bool
104
David Rientjesdbed4522020-06-11 00:25:57 -0700105config DMA_COHERENT_POOL
Christoph Hellwigd07ae4c2020-06-18 17:23:31 +0200106 select GENERIC_ALLOCATOR
David Rientjesdbed4522020-06-11 00:25:57 -0700107 bool
108
Christoph Hellwigfaf4ef82021-06-23 14:21:16 +0200109config DMA_GLOBAL_POOL
110 select DMA_DECLARE_COHERENT
111 bool
112
Christoph Hellwigf0edfea2018-08-24 10:31:08 +0200113config DMA_REMAP
David Rientjesdbed4522020-06-11 00:25:57 -0700114 bool
Christoph Hellwigf0edfea2018-08-24 10:31:08 +0200115 depends on MMU
Christoph Hellwig34dc0ea2019-10-29 11:01:37 +0100116 select DMA_NONCOHERENT_MMAP
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200117
David Rientjese860c292020-04-14 17:04:52 -0700118config DMA_DIRECT_REMAP
119 bool
David Rientjesdbed4522020-06-11 00:25:57 -0700120 select DMA_REMAP
David Rientjese860c292020-04-14 17:04:52 -0700121 select DMA_COHERENT_POOL
122
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100123config DMA_CMA
124 bool "DMA Contiguous Memory Allocator"
125 depends on HAVE_DMA_CONTIGUOUS && CMA
126 help
127 This enables the Contiguous Memory Allocator which allows drivers
128 to allocate big physically-contiguous blocks of memory for use with
129 hardware components that do not support I/O map nor scatter-gather.
130
131 You can disable CMA by specifying "cma=0" on the kernel's command
132 line.
133
Christoph Hellwig0b1abd12020-09-11 10:56:52 +0200134 For more information see <kernel/dma/contiguous.c>.
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100135 If unsure, say "n".
136
137if DMA_CMA
Barry Songb7176c22020-08-24 11:03:07 +1200138
139config DMA_PERNUMA_CMA
140 bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
141 default NUMA && ARM64
142 help
143 Enable this option to get pernuma CMA areas so that devices like
144 ARM64 SMMU can get local memory by DMA coherent APIs.
145
146 You can set the size of pernuma CMA by specifying "cma_pernuma=size"
147 on the kernel's command line.
148
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100149comment "Default contiguous memory area size:"
150
151config CMA_SIZE_MBYTES
152 int "Size in Mega Bytes"
153 depends on !CMA_SIZE_SEL_PERCENTAGE
154 default 0 if X86
155 default 16
156 help
157 Defines the size (in MiB) of the default memory area for Contiguous
158 Memory Allocator. If the size of 0 is selected, CMA is disabled by
159 default, but it can be enabled by passing cma=size[MG] to the kernel.
160
161
162config CMA_SIZE_PERCENTAGE
163 int "Percentage of total memory"
164 depends on !CMA_SIZE_SEL_MBYTES
165 default 0 if X86
166 default 10
167 help
168 Defines the size of the default memory area for Contiguous Memory
169 Allocator as a percentage of the total memory in the system.
170 If 0 percent is selected, CMA is disabled by default, but it can be
171 enabled by passing cma=size[MG] to the kernel.
172
173choice
174 prompt "Selected region size"
175 default CMA_SIZE_SEL_MBYTES
176
177config CMA_SIZE_SEL_MBYTES
178 bool "Use mega bytes value only"
179
180config CMA_SIZE_SEL_PERCENTAGE
181 bool "Use percentage value only"
182
183config CMA_SIZE_SEL_MIN
184 bool "Use lower value (minimum)"
185
186config CMA_SIZE_SEL_MAX
187 bool "Use higher value (maximum)"
188
189endchoice
190
191config CMA_ALIGNMENT
192 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
Paul Cercueil0de32792020-09-30 12:28:21 +0200193 range 2 12
Christoph Hellwigddb26d82019-02-13 19:19:08 +0100194 default 8
195 help
196 DMA mapping framework by default aligns all buffers to the smallest
197 PAGE_SIZE order which is greater than or equal to the requested buffer
198 size. This works well for buffers up to a few hundreds kilobytes, but
199 for larger buffers it just a memory waste. With this parameter you can
200 specify the maximum PAGE_SIZE order for contiguous buffers. Larger
201 buffers will be aligned only to this specified order. The order is
202 expressed as a power of two multiplied by the PAGE_SIZE.
203
204 For example, if your system defaults to 4KiB pages, the order value
205 of 8 means that the buffers will be aligned up to 1MiB only.
206
207 If unsure, leave the default value "8".
208
209endif
210
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200211config DMA_API_DEBUG
212 bool "Enable debugging of DMA-API usage"
213 select NEED_DMA_MAP_STATE
214 help
215 Enable this option to debug the use of the DMA API by device drivers.
216 With this option you will be able to detect common bugs in device
217 drivers like double-freeing of DMA mappings or freeing mappings that
218 were never allocated.
219
Andy Shevchenko70ca7ba2019-02-11 18:12:30 +0200220 This option causes a performance degradation. Use only if you want to
221 debug device drivers and dma interactions.
222
223 If unsure, say N.
224
225config DMA_API_DEBUG_SG
226 bool "Debug DMA scatter-gather usage"
227 default y
228 depends on DMA_API_DEBUG
229 help
230 Perform extra checking that callers of dma_map_sg() have respected the
231 appropriate segment length/boundary limits for the given device when
232 preparing DMA scatterlists.
233
234 This is particularly likely to have been overlooked in cases where the
235 dma_map_sg() API is used for general bulk mapping of pages rather than
236 preparing literal scatter-gather descriptors, where there is a risk of
237 unexpected behaviour from DMA API implementations if the scatterlist
238 is technically out-of-spec.
239
240 If unsure, say N.
Barry Song65789da2020-11-16 19:08:47 +1300241
242config DMA_MAP_BENCHMARK
243 bool "Enable benchmarking of streaming DMA mapping"
244 depends on DEBUG_FS
245 help
246 Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
247 performance of dma_(un)map_page.
248
249 See tools/testing/selftests/dma/dma_map_benchmark.c