Thomas Gleixner | ec8f24b | 2019-05-19 13:07:45 +0100 | [diff] [blame] | 1 | # SPDX-License-Identifier: GPL-2.0-only |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 2 | |
Christoph Hellwig | 846f9e1 | 2020-07-14 14:18:54 +0200 | [diff] [blame] | 3 | config NO_DMA |
| 4 | bool |
| 5 | |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 6 | config HAS_DMA |
| 7 | bool |
| 8 | depends on !NO_DMA |
| 9 | default y |
| 10 | |
Christoph Hellwig | 2f9237d | 2020-07-08 09:30:00 +0200 | [diff] [blame] | 11 | config DMA_OPS |
Christoph Hellwig | ef1a85b | 2020-08-29 10:40:28 +0200 | [diff] [blame] | 12 | depends on HAS_DMA |
Christoph Hellwig | 2f9237d | 2020-07-08 09:30:00 +0200 | [diff] [blame] | 13 | bool |
| 14 | |
Christoph Hellwig | d35834c | 2020-03-23 18:19:30 +0100 | [diff] [blame] | 15 | # |
| 16 | # IOMMU drivers that can bypass the IOMMU code and optionally use the direct |
| 17 | # mapping fast path should select this option and set the dma_ops_bypass |
| 18 | # flag in struct device where applicable |
| 19 | # |
| 20 | config DMA_OPS_BYPASS |
| 21 | bool |
| 22 | |
Alexey Kardashevskiy | 8d8d53c | 2020-10-29 12:52:40 +1100 | [diff] [blame] | 23 | # Lets platform IOMMU driver choose between bypass and IOMMU |
| 24 | config ARCH_HAS_DMA_MAP_DIRECT |
| 25 | bool |
| 26 | |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 27 | config NEED_SG_DMA_LENGTH |
| 28 | bool |
| 29 | |
| 30 | config NEED_DMA_MAP_STATE |
| 31 | bool |
| 32 | |
| 33 | config ARCH_DMA_ADDR_T_64BIT |
| 34 | def_bool 64BIT || PHYS_ADDR_T_64BIT |
| 35 | |
Christoph Hellwig | 11ddce1 | 2019-02-13 08:01:22 +0100 | [diff] [blame] | 36 | config ARCH_HAS_DMA_SET_MASK |
| 37 | bool |
| 38 | |
Christoph Hellwig | 419e2f1 | 2019-08-26 09:03:44 +0200 | [diff] [blame] | 39 | # |
| 40 | # Select this option if the architecture needs special handling for |
| 41 | # DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what |
| 42 | # people thing of when saying write combine, so very few platforms should |
| 43 | # need to enable this. |
| 44 | # |
| 45 | config ARCH_HAS_DMA_WRITE_COMBINE |
| 46 | bool |
| 47 | |
Christoph Hellwig | abdaf11 | 2020-08-17 16:41:50 +0200 | [diff] [blame] | 48 | # |
| 49 | # Select if the architectures provides the arch_dma_mark_clean hook |
| 50 | # |
| 51 | config ARCH_HAS_DMA_MARK_CLEAN |
| 52 | bool |
| 53 | |
Christoph Hellwig | ff4c25f | 2019-02-03 20:12:02 +0100 | [diff] [blame] | 54 | config DMA_DECLARE_COHERENT |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 55 | bool |
| 56 | |
Christoph Hellwig | 347cb6a | 2019-01-07 13:36:20 -0500 | [diff] [blame] | 57 | config ARCH_HAS_SETUP_DMA_OPS |
| 58 | bool |
| 59 | |
Christoph Hellwig | dc2acde | 2018-12-21 22:14:44 +0100 | [diff] [blame] | 60 | config ARCH_HAS_TEARDOWN_DMA_OPS |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 61 | bool |
| 62 | |
| 63 | config ARCH_HAS_SYNC_DMA_FOR_DEVICE |
| 64 | bool |
| 65 | |
| 66 | config ARCH_HAS_SYNC_DMA_FOR_CPU |
| 67 | bool |
| 68 | select NEED_DMA_MAP_STATE |
| 69 | |
Christoph Hellwig | 684f7e9 | 2018-09-11 08:54:57 +0200 | [diff] [blame] | 70 | config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL |
| 71 | bool |
| 72 | |
Christoph Hellwig | 13bf5ce | 2019-03-25 15:44:06 +0100 | [diff] [blame] | 73 | config ARCH_HAS_DMA_PREP_COHERENT |
| 74 | bool |
| 75 | |
Tom Lendacky | 9087c37 | 2019-07-10 19:01:19 +0000 | [diff] [blame] | 76 | config ARCH_HAS_FORCE_DMA_UNENCRYPTED |
| 77 | bool |
| 78 | |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 79 | config SWIOTLB |
| 80 | bool |
Christoph Hellwig | cf65a0f | 2018-06-12 19:01:45 +0200 | [diff] [blame] | 81 | select NEED_DMA_MAP_STATE |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 82 | |
Claire Chang | 0b84e4f | 2021-06-19 11:40:41 +0800 | [diff] [blame] | 83 | config DMA_RESTRICTED_POOL |
| 84 | bool "DMA Restricted Pool" |
Claire Chang | f3c4b13 | 2021-08-27 11:48:02 +0800 | [diff] [blame^] | 85 | depends on OF && OF_RESERVED_MEM && SWIOTLB |
Claire Chang | 0b84e4f | 2021-06-19 11:40:41 +0800 | [diff] [blame] | 86 | help |
| 87 | This enables support for restricted DMA pools which provide a level of |
| 88 | DMA memory protection on systems with limited hardware protection |
| 89 | capabilities, such as those lacking an IOMMU. |
| 90 | |
| 91 | For more information see |
| 92 | <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt> |
| 93 | and <kernel/dma/swiotlb.c>. |
| 94 | If unsure, say "n". |
| 95 | |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 96 | # |
| 97 | # Should be selected if we can mmap non-coherent mappings to userspace. |
| 98 | # The only thing that is really required is a way to set an uncached bit |
| 99 | # in the pagetables |
| 100 | # |
| 101 | config DMA_NONCOHERENT_MMAP |
Christoph Hellwig | 1fbf57d | 2020-06-10 10:29:49 +0200 | [diff] [blame] | 102 | default y if !MMU |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 103 | bool |
| 104 | |
David Rientjes | dbed452 | 2020-06-11 00:25:57 -0700 | [diff] [blame] | 105 | config DMA_COHERENT_POOL |
Christoph Hellwig | d07ae4c | 2020-06-18 17:23:31 +0200 | [diff] [blame] | 106 | select GENERIC_ALLOCATOR |
David Rientjes | dbed452 | 2020-06-11 00:25:57 -0700 | [diff] [blame] | 107 | bool |
| 108 | |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 109 | config DMA_REMAP |
David Rientjes | dbed452 | 2020-06-11 00:25:57 -0700 | [diff] [blame] | 110 | bool |
Christoph Hellwig | f0edfea | 2018-08-24 10:31:08 +0200 | [diff] [blame] | 111 | depends on MMU |
Christoph Hellwig | 34dc0ea | 2019-10-29 11:01:37 +0100 | [diff] [blame] | 112 | select DMA_NONCOHERENT_MMAP |
Andy Shevchenko | 70ca7ba | 2019-02-11 18:12:30 +0200 | [diff] [blame] | 113 | |
David Rientjes | e860c29 | 2020-04-14 17:04:52 -0700 | [diff] [blame] | 114 | config DMA_DIRECT_REMAP |
| 115 | bool |
David Rientjes | dbed452 | 2020-06-11 00:25:57 -0700 | [diff] [blame] | 116 | select DMA_REMAP |
David Rientjes | e860c29 | 2020-04-14 17:04:52 -0700 | [diff] [blame] | 117 | select DMA_COHERENT_POOL |
| 118 | |
Christoph Hellwig | ddb26d8 | 2019-02-13 19:19:08 +0100 | [diff] [blame] | 119 | config DMA_CMA |
| 120 | bool "DMA Contiguous Memory Allocator" |
| 121 | depends on HAVE_DMA_CONTIGUOUS && CMA |
| 122 | help |
| 123 | This enables the Contiguous Memory Allocator which allows drivers |
| 124 | to allocate big physically-contiguous blocks of memory for use with |
| 125 | hardware components that do not support I/O map nor scatter-gather. |
| 126 | |
| 127 | You can disable CMA by specifying "cma=0" on the kernel's command |
| 128 | line. |
| 129 | |
Christoph Hellwig | 0b1abd1 | 2020-09-11 10:56:52 +0200 | [diff] [blame] | 130 | For more information see <kernel/dma/contiguous.c>. |
Christoph Hellwig | ddb26d8 | 2019-02-13 19:19:08 +0100 | [diff] [blame] | 131 | If unsure, say "n". |
| 132 | |
| 133 | if DMA_CMA |
Barry Song | b7176c2 | 2020-08-24 11:03:07 +1200 | [diff] [blame] | 134 | |
| 135 | config DMA_PERNUMA_CMA |
| 136 | bool "Enable separate DMA Contiguous Memory Area for each NUMA Node" |
| 137 | default NUMA && ARM64 |
| 138 | help |
| 139 | Enable this option to get pernuma CMA areas so that devices like |
| 140 | ARM64 SMMU can get local memory by DMA coherent APIs. |
| 141 | |
| 142 | You can set the size of pernuma CMA by specifying "cma_pernuma=size" |
| 143 | on the kernel's command line. |
| 144 | |
Christoph Hellwig | ddb26d8 | 2019-02-13 19:19:08 +0100 | [diff] [blame] | 145 | comment "Default contiguous memory area size:" |
| 146 | |
| 147 | config CMA_SIZE_MBYTES |
| 148 | int "Size in Mega Bytes" |
| 149 | depends on !CMA_SIZE_SEL_PERCENTAGE |
| 150 | default 0 if X86 |
| 151 | default 16 |
| 152 | help |
| 153 | Defines the size (in MiB) of the default memory area for Contiguous |
| 154 | Memory Allocator. If the size of 0 is selected, CMA is disabled by |
| 155 | default, but it can be enabled by passing cma=size[MG] to the kernel. |
| 156 | |
| 157 | |
| 158 | config CMA_SIZE_PERCENTAGE |
| 159 | int "Percentage of total memory" |
| 160 | depends on !CMA_SIZE_SEL_MBYTES |
| 161 | default 0 if X86 |
| 162 | default 10 |
| 163 | help |
| 164 | Defines the size of the default memory area for Contiguous Memory |
| 165 | Allocator as a percentage of the total memory in the system. |
| 166 | If 0 percent is selected, CMA is disabled by default, but it can be |
| 167 | enabled by passing cma=size[MG] to the kernel. |
| 168 | |
| 169 | choice |
| 170 | prompt "Selected region size" |
| 171 | default CMA_SIZE_SEL_MBYTES |
| 172 | |
| 173 | config CMA_SIZE_SEL_MBYTES |
| 174 | bool "Use mega bytes value only" |
| 175 | |
| 176 | config CMA_SIZE_SEL_PERCENTAGE |
| 177 | bool "Use percentage value only" |
| 178 | |
| 179 | config CMA_SIZE_SEL_MIN |
| 180 | bool "Use lower value (minimum)" |
| 181 | |
| 182 | config CMA_SIZE_SEL_MAX |
| 183 | bool "Use higher value (maximum)" |
| 184 | |
| 185 | endchoice |
| 186 | |
| 187 | config CMA_ALIGNMENT |
| 188 | int "Maximum PAGE_SIZE order of alignment for contiguous buffers" |
Paul Cercueil | 0de3279 | 2020-09-30 12:28:21 +0200 | [diff] [blame] | 189 | range 2 12 |
Christoph Hellwig | ddb26d8 | 2019-02-13 19:19:08 +0100 | [diff] [blame] | 190 | default 8 |
| 191 | help |
| 192 | DMA mapping framework by default aligns all buffers to the smallest |
| 193 | PAGE_SIZE order which is greater than or equal to the requested buffer |
| 194 | size. This works well for buffers up to a few hundreds kilobytes, but |
| 195 | for larger buffers it just a memory waste. With this parameter you can |
| 196 | specify the maximum PAGE_SIZE order for contiguous buffers. Larger |
| 197 | buffers will be aligned only to this specified order. The order is |
| 198 | expressed as a power of two multiplied by the PAGE_SIZE. |
| 199 | |
| 200 | For example, if your system defaults to 4KiB pages, the order value |
| 201 | of 8 means that the buffers will be aligned up to 1MiB only. |
| 202 | |
| 203 | If unsure, leave the default value "8". |
| 204 | |
| 205 | endif |
| 206 | |
Andy Shevchenko | 70ca7ba | 2019-02-11 18:12:30 +0200 | [diff] [blame] | 207 | config DMA_API_DEBUG |
| 208 | bool "Enable debugging of DMA-API usage" |
| 209 | select NEED_DMA_MAP_STATE |
| 210 | help |
| 211 | Enable this option to debug the use of the DMA API by device drivers. |
| 212 | With this option you will be able to detect common bugs in device |
| 213 | drivers like double-freeing of DMA mappings or freeing mappings that |
| 214 | were never allocated. |
| 215 | |
Andy Shevchenko | 70ca7ba | 2019-02-11 18:12:30 +0200 | [diff] [blame] | 216 | This option causes a performance degradation. Use only if you want to |
| 217 | debug device drivers and dma interactions. |
| 218 | |
| 219 | If unsure, say N. |
| 220 | |
| 221 | config DMA_API_DEBUG_SG |
| 222 | bool "Debug DMA scatter-gather usage" |
| 223 | default y |
| 224 | depends on DMA_API_DEBUG |
| 225 | help |
| 226 | Perform extra checking that callers of dma_map_sg() have respected the |
| 227 | appropriate segment length/boundary limits for the given device when |
| 228 | preparing DMA scatterlists. |
| 229 | |
| 230 | This is particularly likely to have been overlooked in cases where the |
| 231 | dma_map_sg() API is used for general bulk mapping of pages rather than |
| 232 | preparing literal scatter-gather descriptors, where there is a risk of |
| 233 | unexpected behaviour from DMA API implementations if the scatterlist |
| 234 | is technically out-of-spec. |
| 235 | |
| 236 | If unsure, say N. |
Barry Song | 65789da | 2020-11-16 19:08:47 +1300 | [diff] [blame] | 237 | |
| 238 | config DMA_MAP_BENCHMARK |
| 239 | bool "Enable benchmarking of streaming DMA mapping" |
| 240 | depends on DEBUG_FS |
| 241 | help |
| 242 | Provides /sys/kernel/debug/dma_map_benchmark that helps with testing |
| 243 | performance of dma_(un)map_page. |
| 244 | |
| 245 | See tools/testing/selftests/dma/dma_map_benchmark.c |