summaryrefslogtreecommitdiff
path: root/kernel/dma/Kconfig
blob: 56866aaa2ae1af0aa8145f4311c72801cdd28477 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
# SPDX-License-Identifier: GPL-2.0-only

config NO_DMA
	bool

config HAS_DMA
	bool
	depends on !NO_DMA
	default y

config DMA_OPS
	depends on HAS_DMA
	bool

#
# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
# mapping fast path should select this option and set the dma_ops_bypass
# flag in struct device where applicable
#
config DMA_OPS_BYPASS
	bool

# Lets platform IOMMU driver choose between bypass and IOMMU
config ARCH_HAS_DMA_MAP_DIRECT
	bool

config NEED_SG_DMA_LENGTH
	bool

config NEED_DMA_MAP_STATE
	bool

config ARCH_DMA_ADDR_T_64BIT
	def_bool 64BIT || PHYS_ADDR_T_64BIT

config ARCH_HAS_DMA_SET_MASK
	bool

#
# Select this option if the architecture needs special handling for
# DMA_ATTR_WRITE_COMBINE.  Normally the "uncached" mapping should be what
# people thing of when saying write combine, so very few platforms should
# need to enable this.
#
config ARCH_HAS_DMA_WRITE_COMBINE
	bool

#
# Select if the architectures provides the arch_dma_mark_clean hook
#
config ARCH_HAS_DMA_MARK_CLEAN
	bool

config DMA_DECLARE_COHERENT
	bool

config ARCH_HAS_SETUP_DMA_OPS
	bool

config ARCH_HAS_TEARDOWN_DMA_OPS
	bool

config ARCH_HAS_SYNC_DMA_FOR_DEVICE
	bool

config ARCH_HAS_SYNC_DMA_FOR_CPU
	bool
	select NEED_DMA_MAP_STATE

config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
	bool

config ARCH_HAS_DMA_PREP_COHERENT
	bool

config ARCH_HAS_FORCE_DMA_UNENCRYPTED
	bool

config SWIOTLB
	bool
	select NEED_DMA_MAP_STATE

config DMA_RESTRICTED_POOL
	bool "DMA Restricted Pool"
	depends on OF && OF_RESERVED_MEM && SWIOTLB
	help
	  This enables support for restricted DMA pools which provide a level of
	  DMA memory protection on systems with limited hardware protection
	  capabilities, such as those lacking an IOMMU.

	  For more information see
	  <Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
	  and <kernel/dma/swiotlb.c>.
	  If unsure, say "n".

#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
	default y if !MMU
	bool

config DMA_COHERENT_POOL
	select GENERIC_ALLOCATOR
	bool

config DMA_GLOBAL_POOL
	select DMA_DECLARE_COHERENT
	bool

config DMA_DIRECT_REMAP
	bool
	select DMA_COHERENT_POOL
	select DMA_NONCOHERENT_MMAP

config DMA_CMA
	bool "DMA Contiguous Memory Allocator"
	depends on HAVE_DMA_CONTIGUOUS && CMA
	help
	  This enables the Contiguous Memory Allocator which allows drivers
	  to allocate big physically-contiguous blocks of memory for use with
	  hardware components that do not support I/O map nor scatter-gather.

	  You can disable CMA by specifying "cma=0" on the kernel's command
	  line.

	  For more information see <kernel/dma/contiguous.c>.
	  If unsure, say "n".

if  DMA_CMA

config DMA_PERNUMA_CMA
	bool "Enable separate DMA Contiguous Memory Area for each NUMA Node"
	default NUMA && ARM64
	help
	  Enable this option to get pernuma CMA areas so that devices like
	  ARM64 SMMU can get local memory by DMA coherent APIs.

	  You can set the size of pernuma CMA by specifying "cma_pernuma=size"
	  on the kernel's command line.

comment "Default contiguous memory area size:"

config CMA_SIZE_MBYTES
	int "Size in Mega Bytes"
	depends on !CMA_SIZE_SEL_PERCENTAGE
	default 0 if X86
	default 16
	help
	  Defines the size (in MiB) of the default memory area for Contiguous
	  Memory Allocator.  If the size of 0 is selected, CMA is disabled by
	  default, but it can be enabled by passing cma=size[MG] to the kernel.


config CMA_SIZE_PERCENTAGE
	int "Percentage of total memory"
	depends on !CMA_SIZE_SEL_MBYTES
	default 0 if X86
	default 10
	help
	  Defines the size of the default memory area for Contiguous Memory
	  Allocator as a percentage of the total memory in the system.
	  If 0 percent is selected, CMA is disabled by default, but it can be
	  enabled by passing cma=size[MG] to the kernel.

choice
	prompt "Selected region size"
	default CMA_SIZE_SEL_MBYTES

config CMA_SIZE_SEL_MBYTES
	bool "Use mega bytes value only"

config CMA_SIZE_SEL_PERCENTAGE
	bool "Use percentage value only"

config CMA_SIZE_SEL_MIN
	bool "Use lower value (minimum)"

config CMA_SIZE_SEL_MAX
	bool "Use higher value (maximum)"

endchoice

config CMA_ALIGNMENT
	int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
	range 2 12
	default 8
	help
	  DMA mapping framework by default aligns all buffers to the smallest
	  PAGE_SIZE order which is greater than or equal to the requested buffer
	  size. This works well for buffers up to a few hundreds kilobytes, but
	  for larger buffers it just a memory waste. With this parameter you can
	  specify the maximum PAGE_SIZE order for contiguous buffers. Larger
	  buffers will be aligned only to this specified order. The order is
	  expressed as a power of two multiplied by the PAGE_SIZE.

	  For example, if your system defaults to 4KiB pages, the order value
	  of 8 means that the buffers will be aligned up to 1MiB only.

	  If unsure, leave the default value "8".

endif

config DMA_API_DEBUG
	bool "Enable debugging of DMA-API usage"
	select NEED_DMA_MAP_STATE
	help
	  Enable this option to debug the use of the DMA API by device drivers.
	  With this option you will be able to detect common bugs in device
	  drivers like double-freeing of DMA mappings or freeing mappings that
	  were never allocated.

	  This option causes a performance degradation.  Use only if you want to
	  debug device drivers and dma interactions.

	  If unsure, say N.

config DMA_API_DEBUG_SG
	bool "Debug DMA scatter-gather usage"
	default y
	depends on DMA_API_DEBUG
	help
	  Perform extra checking that callers of dma_map_sg() have respected the
	  appropriate segment length/boundary limits for the given device when
	  preparing DMA scatterlists.

	  This is particularly likely to have been overlooked in cases where the
	  dma_map_sg() API is used for general bulk mapping of pages rather than
	  preparing literal scatter-gather descriptors, where there is a risk of
	  unexpected behaviour from DMA API implementations if the scatterlist
	  is technically out-of-spec.

	  If unsure, say N.

config DMA_MAP_BENCHMARK
	bool "Enable benchmarking of streaming DMA mapping"
	depends on DEBUG_FS
	help
	  Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
	  performance of dma_(un)map_page.

	  See tools/testing/selftests/dma/dma_map_benchmark.c