blob: 9ef80a48707eb6583ff51ec278c1a5b035970ba1 [file] [log] [blame]
Kuninori Morimoto1356a602018-07-02 06:25:11 +00001// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (C) 2013, Analog Devices Inc.
4// Author: Lars-Peter Clausen <lars@metafoo.de>
5
Lars-Peter Clausen28c44682013-04-15 19:19:50 +02006#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/dmaengine.h>
9#include <linux/slab.h>
10#include <sound/pcm.h>
11#include <sound/pcm_params.h>
12#include <sound/soc.h>
13#include <linux/dma-mapping.h>
14#include <linux/of.h>
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020015
16#include <sound/dmaengine_pcm.h>
17
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +020018/*
19 * The platforms dmaengine driver does not support reporting the amount of
20 * bytes that are still left to transfer.
21 */
22#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
23
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +020024static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
25 struct snd_pcm_substream *substream)
26{
27 if (!pcm->chan[substream->stream])
28 return NULL;
29
30 return pcm->chan[substream->stream]->device->dev;
31}
32
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020033/**
34 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
35 * @substream: PCM substream
36 * @params: hw_params
37 * @slave_config: DMA slave config to prepare
38 *
39 * This function can be used as a generic prepare_slave_config callback for
40 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
41 * DAI DMA data. Internally the function will first call
42 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
43 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
44 * remaining fields based on the DAI DMA data.
45 */
46int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
47 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
48{
Kuninori Morimoto0ceef682020-07-20 10:17:39 +090049 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020050 struct snd_dmaengine_dai_dma_data *dma_data;
51 int ret;
52
Bard Liao6e1276a2020-02-25 21:39:16 +080053 if (rtd->num_cpus > 1) {
54 dev_err(rtd->dev,
55 "%s doesn't support Multi CPU yet\n", __func__);
56 return -EINVAL;
57 }
58
Kuninori Morimotoc2233a22020-03-30 10:47:37 +090059 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020060
61 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
62 if (ret)
63 return ret;
64
65 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
66 slave_config);
67
68 return 0;
69}
70EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
71
Kuninori Morimotoece23172019-10-02 14:35:00 +090072static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
73 struct snd_pcm_substream *substream,
74 struct snd_pcm_hw_params *params)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020075{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +000076 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020077 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020078 int (*prepare_slave_config)(struct snd_pcm_substream *substream,
79 struct snd_pcm_hw_params *params,
80 struct dma_slave_config *slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020081 struct dma_slave_config slave_config;
82 int ret;
83
Lee Jonesa894bd72013-11-06 10:16:20 +000084 memset(&slave_config, 0, sizeof(slave_config));
85
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020086 if (!pcm->config)
87 prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
88 else
89 prepare_slave_config = pcm->config->prepare_slave_config;
90
91 if (prepare_slave_config) {
92 ret = prepare_slave_config(substream, params, &slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020093 if (ret)
94 return ret;
95
96 ret = dmaengine_slave_config(chan, &slave_config);
97 if (ret)
98 return ret;
99 }
100
Takashi Iwaid708c2b2019-12-10 15:26:01 +0100101 return 0;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200102}
103
Kuninori Morimotoece23172019-10-02 14:35:00 +0900104static int
105dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
106 struct snd_pcm_substream *substream)
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200107{
Kuninori Morimoto0ceef682020-07-20 10:17:39 +0900108 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000109 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200110 struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
111 struct dma_chan *chan = pcm->chan[substream->stream];
112 struct snd_dmaengine_dai_dma_data *dma_data;
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200113 struct snd_pcm_hardware hw;
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200114
Bard Liao6e1276a2020-02-25 21:39:16 +0800115 if (rtd->num_cpus > 1) {
116 dev_err(rtd->dev,
117 "%s doesn't support Multi CPU yet\n", __func__);
118 return -EINVAL;
119 }
120
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200121 if (pcm->config && pcm->config->pcm_hardware)
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200122 return snd_soc_set_runtime_hwparams(substream,
123 pcm->config->pcm_hardware);
124
Kuninori Morimotoc2233a22020-03-30 10:47:37 +0900125 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200126
127 memset(&hw, 0, sizeof(hw));
128 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
129 SNDRV_PCM_INFO_INTERLEAVED;
130 hw.periods_min = 2;
131 hw.periods_max = UINT_MAX;
132 hw.period_bytes_min = 256;
133 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
134 hw.buffer_bytes_max = SIZE_MAX;
135 hw.fifo_size = dma_data->fifo_size;
136
Lars-Peter Clausena22f33b2013-11-30 18:00:45 +0100137 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
138 hw.info |= SNDRV_PCM_INFO_BATCH;
139
Shengjiu Wang13012802020-01-20 15:28:06 +0800140 /**
141 * FIXME: Remove the return value check to align with the code
142 * before adding snd_dmaengine_pcm_refine_runtime_hwparams
143 * function.
144 */
145 snd_dmaengine_pcm_refine_runtime_hwparams(substream,
146 dma_data,
147 &hw,
148 chan);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200149
150 return snd_soc_set_runtime_hwparams(substream, &hw);
151}
152
Kuninori Morimotoece23172019-10-02 14:35:00 +0900153static int dmaengine_pcm_open(struct snd_soc_component *component,
154 struct snd_pcm_substream *substream)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200155{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000156 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200157 struct dma_chan *chan = pcm->chan[substream->stream];
158 int ret;
159
Kuninori Morimotoece23172019-10-02 14:35:00 +0900160 ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200161 if (ret)
162 return ret;
163
164 return snd_dmaengine_pcm_open(substream, chan);
165}
166
Kuninori Morimotoece23172019-10-02 14:35:00 +0900167static int dmaengine_pcm_close(struct snd_soc_component *component,
168 struct snd_pcm_substream *substream)
169{
170 return snd_dmaengine_pcm_close(substream);
171}
172
Kuninori Morimotoece23172019-10-02 14:35:00 +0900173static int dmaengine_pcm_trigger(struct snd_soc_component *component,
174 struct snd_pcm_substream *substream, int cmd)
175{
176 return snd_dmaengine_pcm_trigger(substream, cmd);
177}
178
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200179static struct dma_chan *dmaengine_pcm_compat_request_channel(
Kuninori Morimotoece23172019-10-02 14:35:00 +0900180 struct snd_soc_component *component,
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200181 struct snd_soc_pcm_runtime *rtd,
182 struct snd_pcm_substream *substream)
183{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000184 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Mark Brown90130d22013-10-19 21:38:26 +0100185 struct snd_dmaengine_dai_dma_data *dma_data;
Xiubo Liec4f2852014-01-16 16:08:04 +0800186 dma_filter_fn fn = NULL;
Mark Brown90130d22013-10-19 21:38:26 +0100187
Bard Liao6e1276a2020-02-25 21:39:16 +0800188 if (rtd->num_cpus > 1) {
189 dev_err(rtd->dev,
190 "%s doesn't support Multi CPU yet\n", __func__);
191 return NULL;
192 }
193
Kuninori Morimotoc2233a22020-03-30 10:47:37 +0900194 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200195
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200196 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
197 return pcm->chan[0];
198
Xiubo Liec4f2852014-01-16 16:08:04 +0800199 if (pcm->config && pcm->config->compat_request_channel)
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200200 return pcm->config->compat_request_channel(rtd, substream);
201
Xiubo Liec4f2852014-01-16 16:08:04 +0800202 if (pcm->config)
203 fn = pcm->config->compat_filter_fn;
204
205 return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200206}
207
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200208static bool dmaengine_pcm_can_report_residue(struct device *dev,
209 struct dma_chan *chan)
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100210{
211 struct dma_slave_caps dma_caps;
212 int ret;
213
214 ret = dma_get_slave_caps(chan, &dma_caps);
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200215 if (ret != 0) {
216 dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
217 ret);
218 return false;
219 }
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100220
221 if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
222 return false;
223
224 return true;
225}
226
Kuninori Morimotoece23172019-10-02 14:35:00 +0900227static int dmaengine_pcm_new(struct snd_soc_component *component,
228 struct snd_soc_pcm_runtime *rtd)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200229{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000230 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200231 const struct snd_dmaengine_pcm_config *config = pcm->config;
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000232 struct device *dev = component->dev;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200233 struct snd_pcm_substream *substream;
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200234 size_t prealloc_buffer_size;
235 size_t max_buffer_size;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200236 unsigned int i;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200237
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200238 if (config && config->prealloc_buffer_size) {
239 prealloc_buffer_size = config->prealloc_buffer_size;
240 max_buffer_size = config->pcm_hardware->buffer_bytes_max;
241 } else {
242 prealloc_buffer_size = 512 * 1024;
243 max_buffer_size = SIZE_MAX;
244 }
245
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900246 for_each_pcm_streams(i) {
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200247 substream = rtd->pcm->streams[i].substream;
248 if (!substream)
249 continue;
250
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100251 if (!pcm->chan[i] && config && config->chan_names[i])
Sylwester Nawrocki9bfa24e2017-01-17 14:16:41 +0100252 pcm->chan[i] = dma_request_slave_channel(dev,
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100253 config->chan_names[i]);
Sylwester Nawrocki9bfa24e2017-01-17 14:16:41 +0100254
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200255 if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
Kuninori Morimotoece23172019-10-02 14:35:00 +0900256 pcm->chan[i] = dmaengine_pcm_compat_request_channel(
257 component, rtd, substream);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200258 }
259
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200260 if (!pcm->chan[i]) {
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000261 dev_err(component->dev,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200262 "Missing dma channel for stream: %d\n", i);
Lars-Peter Clausende7621e2015-01-02 13:56:07 +0100263 return -EINVAL;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200264 }
265
Takashi Iwaid708c2b2019-12-10 15:26:01 +0100266 snd_pcm_set_managed_buffer(substream,
Nicolin Chenca2b0292013-11-07 14:45:16 +0800267 SNDRV_DMA_TYPE_DEV_IRAM,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200268 dmaengine_dma_dev(pcm, substream),
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200269 prealloc_buffer_size,
270 max_buffer_size);
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100271
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200272 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100273 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
Peter Ujfalusi2ec42f32019-09-06 08:55:24 +0300274
275 if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
Peter Ujfalusi48118a92019-09-11 11:33:31 +0300276 strscpy_pad(rtd->pcm->streams[i].pcm->name,
277 rtd->pcm->streams[i].pcm->id,
278 sizeof(rtd->pcm->streams[i].pcm->name));
Peter Ujfalusi2ec42f32019-09-06 08:55:24 +0300279 }
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200280 }
281
282 return 0;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200283}
284
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100285static snd_pcm_uframes_t dmaengine_pcm_pointer(
Kuninori Morimotoece23172019-10-02 14:35:00 +0900286 struct snd_soc_component *component,
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100287 struct snd_pcm_substream *substream)
288{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000289 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100290
291 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
292 return snd_dmaengine_pcm_pointer_no_residue(substream);
293 else
294 return snd_dmaengine_pcm_pointer(substream);
295}
296
Kuninori Morimotoece23172019-10-02 14:35:00 +0900297static int dmaengine_copy_user(struct snd_soc_component *component,
298 struct snd_pcm_substream *substream,
Olivier Moysan78648092018-02-19 16:00:36 +0100299 int channel, unsigned long hwoff,
Takashi Iwai40d12992018-07-25 22:42:08 +0200300 void __user *buf, unsigned long bytes)
Olivier Moysan78648092018-02-19 16:00:36 +0100301{
Olivier Moysan78648092018-02-19 16:00:36 +0100302 struct snd_pcm_runtime *runtime = substream->runtime;
303 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
304 int (*process)(struct snd_pcm_substream *substream,
305 int channel, unsigned long hwoff,
306 void *buf, unsigned long bytes) = pcm->config->process;
307 bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
308 void *dma_ptr = runtime->dma_area + hwoff +
309 channel * (runtime->dma_bytes / runtime->channels);
310 int ret;
311
312 if (is_playback)
Takashi Iwai40d12992018-07-25 22:42:08 +0200313 if (copy_from_user(dma_ptr, buf, bytes))
Olivier Moysan78648092018-02-19 16:00:36 +0100314 return -EFAULT;
315
316 if (process) {
Takashi Iwai40d12992018-07-25 22:42:08 +0200317 ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
Olivier Moysan78648092018-02-19 16:00:36 +0100318 if (ret < 0)
319 return ret;
320 }
321
322 if (!is_playback)
Takashi Iwai40d12992018-07-25 22:42:08 +0200323 if (copy_to_user(buf, dma_ptr, bytes))
Olivier Moysan78648092018-02-19 16:00:36 +0100324 return -EFAULT;
325
326 return 0;
327}
328
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000329static const struct snd_soc_component_driver dmaengine_pcm_component = {
330 .name = SND_DMAENGINE_PCM_DRV_NAME,
331 .probe_order = SND_SOC_COMP_ORDER_LATE,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900332 .open = dmaengine_pcm_open,
333 .close = dmaengine_pcm_close,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900334 .hw_params = dmaengine_pcm_hw_params,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900335 .trigger = dmaengine_pcm_trigger,
336 .pointer = dmaengine_pcm_pointer,
337 .pcm_construct = dmaengine_pcm_new,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200338};
339
Olivier Moysan78648092018-02-19 16:00:36 +0100340static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
341 .name = SND_DMAENGINE_PCM_DRV_NAME,
342 .probe_order = SND_SOC_COMP_ORDER_LATE,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900343 .open = dmaengine_pcm_open,
344 .close = dmaengine_pcm_close,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900345 .hw_params = dmaengine_pcm_hw_params,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900346 .trigger = dmaengine_pcm_trigger,
347 .pointer = dmaengine_pcm_pointer,
348 .copy_user = dmaengine_copy_user,
349 .pcm_construct = dmaengine_pcm_new,
Olivier Moysan78648092018-02-19 16:00:36 +0100350};
351
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200352static const char * const dmaengine_pcm_dma_channel_names[] = {
353 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
354 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
355};
356
Stephen Warren5eda87b2013-12-10 11:11:02 -0700357static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
Stephen Warren194c7de2013-12-03 14:26:34 -0700358 struct device *dev, const struct snd_dmaengine_pcm_config *config)
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200359{
360 unsigned int i;
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700361 const char *name;
Stephen Warren5eda87b2013-12-10 11:11:02 -0700362 struct dma_chan *chan;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200363
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100364 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
365 !(config && config->dma_dev && config->dma_dev->of_node)))
Stephen Warren5eda87b2013-12-10 11:11:02 -0700366 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200367
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800368 if (config && config->dma_dev) {
Stephen Warren194c7de2013-12-03 14:26:34 -0700369 /*
370 * If this warning is seen, it probably means that your Linux
371 * device structure does not match your HW device structure.
372 * It would be best to refactor the Linux device structure to
373 * correctly match the HW structure.
374 */
375 dev_warn(dev, "DMA channels sourced from device %s",
376 dev_name(config->dma_dev));
377 dev = config->dma_dev;
378 }
379
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900380 for_each_pcm_streams(i) {
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700381 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
382 name = "rx-tx";
383 else
384 name = dmaengine_pcm_dma_channel_names[i];
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800385 if (config && config->chan_names[i])
Stephen Warren194c7de2013-12-03 14:26:34 -0700386 name = config->chan_names[i];
Peter Ujfaluside8cf9522019-11-13 11:54:44 +0200387 chan = dma_request_chan(dev, name);
Stephen Warren5eda87b2013-12-10 11:11:02 -0700388 if (IS_ERR(chan)) {
Mark Brown86f29c72020-10-08 17:11:05 +0100389 /*
390 * Only report probe deferral errors, channels
391 * might not be present for devices that
392 * support only TX or only RX.
393 */
Stephen Warrene9036c22013-12-11 11:20:50 -0700394 if (PTR_ERR(chan) == -EPROBE_DEFER)
Stephen Warren5eda87b2013-12-10 11:11:02 -0700395 return -EPROBE_DEFER;
396 pcm->chan[i] = NULL;
397 } else {
398 pcm->chan[i] = chan;
399 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700400 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
401 break;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200402 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700403
404 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
405 pcm->chan[1] = pcm->chan[0];
Stephen Warren5eda87b2013-12-10 11:11:02 -0700406
407 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200408}
409
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700410static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
411{
412 unsigned int i;
413
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900414 for_each_pcm_streams(i) {
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700415 if (!pcm->chan[i])
416 continue;
417 dma_release_channel(pcm->chan[i]);
418 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
419 break;
420 }
421}
422
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200423/**
424 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
425 * @dev: The parent device for the PCM device
426 * @config: Platform specific PCM configuration
427 * @flags: Platform specific quirks
428 */
429int snd_dmaengine_pcm_register(struct device *dev,
430 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
431{
Cezary Rojewskiea029dd2020-07-31 16:41:46 +0200432 const struct snd_soc_component_driver *driver;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200433 struct dmaengine_pcm *pcm;
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700434 int ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200435
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200436 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
437 if (!pcm)
438 return -ENOMEM;
439
Fabio Estevamf0b3bdb2018-02-21 14:57:33 -0300440#ifdef CONFIG_DEBUG_FS
441 pcm->component.debugfs_prefix = "dma";
442#endif
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200443 pcm->config = config;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200444 pcm->flags = flags;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200445
Stephen Warren5eda87b2013-12-10 11:11:02 -0700446 ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
447 if (ret)
Fabio Estevamb84acf42018-02-26 15:55:25 -0300448 goto err_free_dma;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200449
Olivier Moysan78648092018-02-19 16:00:36 +0100450 if (config && config->process)
Cezary Rojewskiea029dd2020-07-31 16:41:46 +0200451 driver = &dmaengine_pcm_component_process;
Olivier Moysan78648092018-02-19 16:00:36 +0100452 else
Cezary Rojewskiea029dd2020-07-31 16:41:46 +0200453 driver = &dmaengine_pcm_component;
454
455 ret = snd_soc_component_initialize(&pcm->component, driver, dev);
456 if (ret)
457 goto err_free_dma;
458
459 ret = snd_soc_add_component(&pcm->component, NULL, 0);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700460 if (ret)
461 goto err_free_dma;
462
463 return 0;
464
465err_free_dma:
466 dmaengine_pcm_release_chan(pcm);
467 kfree(pcm);
468 return ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200469}
470EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
471
472/**
473 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
474 * @dev: Parent device the PCM was register with
475 *
476 * Removes a dmaengine based PCM device previously registered with
477 * snd_dmaengine_pcm_register.
478 */
479void snd_dmaengine_pcm_unregister(struct device *dev)
480{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000481 struct snd_soc_component *component;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200482 struct dmaengine_pcm *pcm;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200483
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000484 component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
485 if (!component)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200486 return;
487
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000488 pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200489
Maxime Ripard58f30150f2020-07-07 09:42:37 +0200490 snd_soc_unregister_component_by_driver(dev, component->driver);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700491 dmaengine_pcm_release_chan(pcm);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200492 kfree(pcm);
493}
494EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
495
496MODULE_LICENSE("GPL");