blob: f728309a0833b250dea96a147964a416e9bd76e8 [file] [log] [blame]
Kuninori Morimoto1356a602018-07-02 06:25:11 +00001// SPDX-License-Identifier: GPL-2.0+
2//
3// Copyright (C) 2013, Analog Devices Inc.
4// Author: Lars-Peter Clausen <lars@metafoo.de>
5
Lars-Peter Clausen28c44682013-04-15 19:19:50 +02006#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/dmaengine.h>
9#include <linux/slab.h>
10#include <sound/pcm.h>
11#include <sound/pcm_params.h>
12#include <sound/soc.h>
13#include <linux/dma-mapping.h>
14#include <linux/of.h>
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020015
16#include <sound/dmaengine_pcm.h>
17
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +020018/*
19 * The platforms dmaengine driver does not support reporting the amount of
20 * bytes that are still left to transfer.
21 */
22#define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
23
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020024struct dmaengine_pcm {
Takashi Iwaif82bf8e2013-10-25 18:06:09 +020025 struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020026 const struct snd_dmaengine_pcm_config *config;
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +000027 struct snd_soc_component component;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +020028 unsigned int flags;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020029};
30
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +000031static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020032{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +000033 return container_of(p, struct dmaengine_pcm, component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020034}
35
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +020036static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
37 struct snd_pcm_substream *substream)
38{
39 if (!pcm->chan[substream->stream])
40 return NULL;
41
42 return pcm->chan[substream->stream]->device->dev;
43}
44
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020045/**
46 * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
47 * @substream: PCM substream
48 * @params: hw_params
49 * @slave_config: DMA slave config to prepare
50 *
51 * This function can be used as a generic prepare_slave_config callback for
52 * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
53 * DAI DMA data. Internally the function will first call
54 * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
55 * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
56 * remaining fields based on the DAI DMA data.
57 */
58int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
59 struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
60{
61 struct snd_soc_pcm_runtime *rtd = substream->private_data;
62 struct snd_dmaengine_dai_dma_data *dma_data;
63 int ret;
64
Bard Liao6e1276a2020-02-25 21:39:16 +080065 if (rtd->num_cpus > 1) {
66 dev_err(rtd->dev,
67 "%s doesn't support Multi CPU yet\n", __func__);
68 return -EINVAL;
69 }
70
Kuninori Morimotoc2233a22020-03-30 10:47:37 +090071 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020072
73 ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
74 if (ret)
75 return ret;
76
77 snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
78 slave_config);
79
80 return 0;
81}
82EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
83
Kuninori Morimotoece23172019-10-02 14:35:00 +090084static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
85 struct snd_pcm_substream *substream,
86 struct snd_pcm_hw_params *params)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020087{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +000088 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020089 struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020090 int (*prepare_slave_config)(struct snd_pcm_substream *substream,
91 struct snd_pcm_hw_params *params,
92 struct dma_slave_config *slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +020093 struct dma_slave_config slave_config;
94 int ret;
95
Lee Jonesa894bd72013-11-06 10:16:20 +000096 memset(&slave_config, 0, sizeof(slave_config));
97
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +020098 if (!pcm->config)
99 prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
100 else
101 prepare_slave_config = pcm->config->prepare_slave_config;
102
103 if (prepare_slave_config) {
104 ret = prepare_slave_config(substream, params, &slave_config);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200105 if (ret)
106 return ret;
107
108 ret = dmaengine_slave_config(chan, &slave_config);
109 if (ret)
110 return ret;
111 }
112
Takashi Iwaid708c2b2019-12-10 15:26:01 +0100113 return 0;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200114}
115
Kuninori Morimotoece23172019-10-02 14:35:00 +0900116static int
117dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
118 struct snd_pcm_substream *substream)
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200119{
120 struct snd_soc_pcm_runtime *rtd = substream->private_data;
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000121 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200122 struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
123 struct dma_chan *chan = pcm->chan[substream->stream];
124 struct snd_dmaengine_dai_dma_data *dma_data;
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200125 struct snd_pcm_hardware hw;
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200126
Bard Liao6e1276a2020-02-25 21:39:16 +0800127 if (rtd->num_cpus > 1) {
128 dev_err(rtd->dev,
129 "%s doesn't support Multi CPU yet\n", __func__);
130 return -EINVAL;
131 }
132
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200133 if (pcm->config && pcm->config->pcm_hardware)
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200134 return snd_soc_set_runtime_hwparams(substream,
135 pcm->config->pcm_hardware);
136
Kuninori Morimotoc2233a22020-03-30 10:47:37 +0900137 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200138
139 memset(&hw, 0, sizeof(hw));
140 hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
141 SNDRV_PCM_INFO_INTERLEAVED;
142 hw.periods_min = 2;
143 hw.periods_max = UINT_MAX;
144 hw.period_bytes_min = 256;
145 hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
146 hw.buffer_bytes_max = SIZE_MAX;
147 hw.fifo_size = dma_data->fifo_size;
148
Lars-Peter Clausena22f33b2013-11-30 18:00:45 +0100149 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
150 hw.info |= SNDRV_PCM_INFO_BATCH;
151
Shengjiu Wang13012802020-01-20 15:28:06 +0800152 /**
153 * FIXME: Remove the return value check to align with the code
154 * before adding snd_dmaengine_pcm_refine_runtime_hwparams
155 * function.
156 */
157 snd_dmaengine_pcm_refine_runtime_hwparams(substream,
158 dma_data,
159 &hw,
160 chan);
Lars-Peter Clausenc0de42b2013-10-08 15:07:59 +0200161
162 return snd_soc_set_runtime_hwparams(substream, &hw);
163}
164
Kuninori Morimotoece23172019-10-02 14:35:00 +0900165static int dmaengine_pcm_open(struct snd_soc_component *component,
166 struct snd_pcm_substream *substream)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200167{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000168 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200169 struct dma_chan *chan = pcm->chan[substream->stream];
170 int ret;
171
Kuninori Morimotoece23172019-10-02 14:35:00 +0900172 ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200173 if (ret)
174 return ret;
175
176 return snd_dmaengine_pcm_open(substream, chan);
177}
178
Kuninori Morimotoece23172019-10-02 14:35:00 +0900179static int dmaengine_pcm_close(struct snd_soc_component *component,
180 struct snd_pcm_substream *substream)
181{
182 return snd_dmaengine_pcm_close(substream);
183}
184
Kuninori Morimotoece23172019-10-02 14:35:00 +0900185static int dmaengine_pcm_trigger(struct snd_soc_component *component,
186 struct snd_pcm_substream *substream, int cmd)
187{
188 return snd_dmaengine_pcm_trigger(substream, cmd);
189}
190
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200191static struct dma_chan *dmaengine_pcm_compat_request_channel(
Kuninori Morimotoece23172019-10-02 14:35:00 +0900192 struct snd_soc_component *component,
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200193 struct snd_soc_pcm_runtime *rtd,
194 struct snd_pcm_substream *substream)
195{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000196 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Mark Brown90130d22013-10-19 21:38:26 +0100197 struct snd_dmaengine_dai_dma_data *dma_data;
Xiubo Liec4f2852014-01-16 16:08:04 +0800198 dma_filter_fn fn = NULL;
Mark Brown90130d22013-10-19 21:38:26 +0100199
Bard Liao6e1276a2020-02-25 21:39:16 +0800200 if (rtd->num_cpus > 1) {
201 dev_err(rtd->dev,
202 "%s doesn't support Multi CPU yet\n", __func__);
203 return NULL;
204 }
205
Kuninori Morimotoc2233a22020-03-30 10:47:37 +0900206 dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200207
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200208 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
209 return pcm->chan[0];
210
Xiubo Liec4f2852014-01-16 16:08:04 +0800211 if (pcm->config && pcm->config->compat_request_channel)
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200212 return pcm->config->compat_request_channel(rtd, substream);
213
Xiubo Liec4f2852014-01-16 16:08:04 +0800214 if (pcm->config)
215 fn = pcm->config->compat_filter_fn;
216
217 return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200218}
219
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200220static bool dmaengine_pcm_can_report_residue(struct device *dev,
221 struct dma_chan *chan)
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100222{
223 struct dma_slave_caps dma_caps;
224 int ret;
225
226 ret = dma_get_slave_caps(chan, &dma_caps);
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200227 if (ret != 0) {
228 dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
229 ret);
230 return false;
231 }
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100232
233 if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
234 return false;
235
236 return true;
237}
238
Kuninori Morimotoece23172019-10-02 14:35:00 +0900239static int dmaengine_pcm_new(struct snd_soc_component *component,
240 struct snd_soc_pcm_runtime *rtd)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200241{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000242 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200243 const struct snd_dmaengine_pcm_config *config = pcm->config;
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000244 struct device *dev = component->dev;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200245 struct snd_pcm_substream *substream;
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200246 size_t prealloc_buffer_size;
247 size_t max_buffer_size;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200248 unsigned int i;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200249
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200250 if (config && config->prealloc_buffer_size) {
251 prealloc_buffer_size = config->prealloc_buffer_size;
252 max_buffer_size = config->pcm_hardware->buffer_bytes_max;
253 } else {
254 prealloc_buffer_size = 512 * 1024;
255 max_buffer_size = SIZE_MAX;
256 }
257
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900258 for_each_pcm_streams(i) {
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200259 substream = rtd->pcm->streams[i].substream;
260 if (!substream)
261 continue;
262
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100263 if (!pcm->chan[i] && config && config->chan_names[i])
Sylwester Nawrocki9bfa24e2017-01-17 14:16:41 +0100264 pcm->chan[i] = dma_request_slave_channel(dev,
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100265 config->chan_names[i]);
Sylwester Nawrocki9bfa24e2017-01-17 14:16:41 +0100266
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200267 if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
Kuninori Morimotoece23172019-10-02 14:35:00 +0900268 pcm->chan[i] = dmaengine_pcm_compat_request_channel(
269 component, rtd, substream);
Lars-Peter Clausenc9998362013-04-15 19:19:51 +0200270 }
271
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200272 if (!pcm->chan[i]) {
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000273 dev_err(component->dev,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200274 "Missing dma channel for stream: %d\n", i);
Lars-Peter Clausende7621e2015-01-02 13:56:07 +0100275 return -EINVAL;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200276 }
277
Takashi Iwaid708c2b2019-12-10 15:26:01 +0100278 snd_pcm_set_managed_buffer(substream,
Nicolin Chenca2b0292013-11-07 14:45:16 +0800279 SNDRV_DMA_TYPE_DEV_IRAM,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200280 dmaengine_dma_dev(pcm, substream),
Lars-Peter Clausenfa654e02013-10-08 15:08:00 +0200281 prealloc_buffer_size,
282 max_buffer_size);
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100283
Lars-Peter Clausenacde50a2015-04-27 12:44:25 +0200284 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
Lars-Peter Clausen478028e2014-01-11 14:02:19 +0100285 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
Peter Ujfalusi2ec42f32019-09-06 08:55:24 +0300286
287 if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
Peter Ujfalusi48118a92019-09-11 11:33:31 +0300288 strscpy_pad(rtd->pcm->streams[i].pcm->name,
289 rtd->pcm->streams[i].pcm->id,
290 sizeof(rtd->pcm->streams[i].pcm->name));
Peter Ujfalusi2ec42f32019-09-06 08:55:24 +0300291 }
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200292 }
293
294 return 0;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200295}
296
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100297static snd_pcm_uframes_t dmaengine_pcm_pointer(
Kuninori Morimotoece23172019-10-02 14:35:00 +0900298 struct snd_soc_component *component,
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100299 struct snd_pcm_substream *substream)
300{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000301 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
Lars-Peter Clausen93b943e2014-01-11 14:02:18 +0100302
303 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
304 return snd_dmaengine_pcm_pointer_no_residue(substream);
305 else
306 return snd_dmaengine_pcm_pointer(substream);
307}
308
Kuninori Morimotoece23172019-10-02 14:35:00 +0900309static int dmaengine_copy_user(struct snd_soc_component *component,
310 struct snd_pcm_substream *substream,
Olivier Moysan78648092018-02-19 16:00:36 +0100311 int channel, unsigned long hwoff,
Takashi Iwai40d12992018-07-25 22:42:08 +0200312 void __user *buf, unsigned long bytes)
Olivier Moysan78648092018-02-19 16:00:36 +0100313{
Olivier Moysan78648092018-02-19 16:00:36 +0100314 struct snd_pcm_runtime *runtime = substream->runtime;
315 struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
316 int (*process)(struct snd_pcm_substream *substream,
317 int channel, unsigned long hwoff,
318 void *buf, unsigned long bytes) = pcm->config->process;
319 bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
320 void *dma_ptr = runtime->dma_area + hwoff +
321 channel * (runtime->dma_bytes / runtime->channels);
322 int ret;
323
324 if (is_playback)
Takashi Iwai40d12992018-07-25 22:42:08 +0200325 if (copy_from_user(dma_ptr, buf, bytes))
Olivier Moysan78648092018-02-19 16:00:36 +0100326 return -EFAULT;
327
328 if (process) {
Takashi Iwai40d12992018-07-25 22:42:08 +0200329 ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
Olivier Moysan78648092018-02-19 16:00:36 +0100330 if (ret < 0)
331 return ret;
332 }
333
334 if (!is_playback)
Takashi Iwai40d12992018-07-25 22:42:08 +0200335 if (copy_to_user(buf, dma_ptr, bytes))
Olivier Moysan78648092018-02-19 16:00:36 +0100336 return -EFAULT;
337
338 return 0;
339}
340
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000341static const struct snd_soc_component_driver dmaengine_pcm_component = {
342 .name = SND_DMAENGINE_PCM_DRV_NAME,
343 .probe_order = SND_SOC_COMP_ORDER_LATE,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900344 .open = dmaengine_pcm_open,
345 .close = dmaengine_pcm_close,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900346 .hw_params = dmaengine_pcm_hw_params,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900347 .trigger = dmaengine_pcm_trigger,
348 .pointer = dmaengine_pcm_pointer,
349 .pcm_construct = dmaengine_pcm_new,
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200350};
351
Olivier Moysan78648092018-02-19 16:00:36 +0100352static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
353 .name = SND_DMAENGINE_PCM_DRV_NAME,
354 .probe_order = SND_SOC_COMP_ORDER_LATE,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900355 .open = dmaengine_pcm_open,
356 .close = dmaengine_pcm_close,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900357 .hw_params = dmaengine_pcm_hw_params,
Kuninori Morimotoece23172019-10-02 14:35:00 +0900358 .trigger = dmaengine_pcm_trigger,
359 .pointer = dmaengine_pcm_pointer,
360 .copy_user = dmaengine_copy_user,
361 .pcm_construct = dmaengine_pcm_new,
Olivier Moysan78648092018-02-19 16:00:36 +0100362};
363
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200364static const char * const dmaengine_pcm_dma_channel_names[] = {
365 [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
366 [SNDRV_PCM_STREAM_CAPTURE] = "rx",
367};
368
Stephen Warren5eda87b2013-12-10 11:11:02 -0700369static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
Stephen Warren194c7de2013-12-03 14:26:34 -0700370 struct device *dev, const struct snd_dmaengine_pcm_config *config)
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200371{
372 unsigned int i;
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700373 const char *name;
Stephen Warren5eda87b2013-12-10 11:11:02 -0700374 struct dma_chan *chan;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200375
Sylwester Nawrocki76d9c682019-02-14 16:45:55 +0100376 if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
377 !(config && config->dma_dev && config->dma_dev->of_node)))
Stephen Warren5eda87b2013-12-10 11:11:02 -0700378 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200379
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800380 if (config && config->dma_dev) {
Stephen Warren194c7de2013-12-03 14:26:34 -0700381 /*
382 * If this warning is seen, it probably means that your Linux
383 * device structure does not match your HW device structure.
384 * It would be best to refactor the Linux device structure to
385 * correctly match the HW structure.
386 */
387 dev_warn(dev, "DMA channels sourced from device %s",
388 dev_name(config->dma_dev));
389 dev = config->dma_dev;
390 }
391
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900392 for_each_pcm_streams(i) {
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700393 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
394 name = "rx-tx";
395 else
396 name = dmaengine_pcm_dma_channel_names[i];
Xiubo Li2b67f8b2013-12-17 15:16:40 +0800397 if (config && config->chan_names[i])
Stephen Warren194c7de2013-12-03 14:26:34 -0700398 name = config->chan_names[i];
Peter Ujfaluside8cf9522019-11-13 11:54:44 +0200399 chan = dma_request_chan(dev, name);
Stephen Warren5eda87b2013-12-10 11:11:02 -0700400 if (IS_ERR(chan)) {
Stephen Warrene9036c22013-12-11 11:20:50 -0700401 if (PTR_ERR(chan) == -EPROBE_DEFER)
Stephen Warren5eda87b2013-12-10 11:11:02 -0700402 return -EPROBE_DEFER;
403 pcm->chan[i] = NULL;
404 } else {
405 pcm->chan[i] = chan;
406 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700407 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
408 break;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200409 }
Stephen Warren11b3a7a2013-12-03 14:26:32 -0700410
411 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
412 pcm->chan[1] = pcm->chan[0];
Stephen Warren5eda87b2013-12-10 11:11:02 -0700413
414 return 0;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200415}
416
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700417static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
418{
419 unsigned int i;
420
Kuninori Morimotoee10fbe2020-02-17 17:28:32 +0900421 for_each_pcm_streams(i) {
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700422 if (!pcm->chan[i])
423 continue;
424 dma_release_channel(pcm->chan[i]);
425 if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
426 break;
427 }
428}
429
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200430/**
431 * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
432 * @dev: The parent device for the PCM device
433 * @config: Platform specific PCM configuration
434 * @flags: Platform specific quirks
435 */
436int snd_dmaengine_pcm_register(struct device *dev,
437 const struct snd_dmaengine_pcm_config *config, unsigned int flags)
438{
439 struct dmaengine_pcm *pcm;
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700440 int ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200441
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200442 pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
443 if (!pcm)
444 return -ENOMEM;
445
Fabio Estevamf0b3bdb2018-02-21 14:57:33 -0300446#ifdef CONFIG_DEBUG_FS
447 pcm->component.debugfs_prefix = "dma";
448#endif
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200449 pcm->config = config;
Lars-Peter Clausend1e14062013-04-20 19:29:00 +0200450 pcm->flags = flags;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200451
Stephen Warren5eda87b2013-12-10 11:11:02 -0700452 ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
453 if (ret)
Fabio Estevamb84acf42018-02-26 15:55:25 -0300454 goto err_free_dma;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200455
Olivier Moysan78648092018-02-19 16:00:36 +0100456 if (config && config->process)
457 ret = snd_soc_add_component(dev, &pcm->component,
458 &dmaengine_pcm_component_process,
459 NULL, 0);
460 else
461 ret = snd_soc_add_component(dev, &pcm->component,
462 &dmaengine_pcm_component, NULL, 0);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700463 if (ret)
464 goto err_free_dma;
465
466 return 0;
467
468err_free_dma:
469 dmaengine_pcm_release_chan(pcm);
470 kfree(pcm);
471 return ret;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200472}
473EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
474
475/**
476 * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
477 * @dev: Parent device the PCM was register with
478 *
479 * Removes a dmaengine based PCM device previously registered with
480 * snd_dmaengine_pcm_register.
481 */
482void snd_dmaengine_pcm_unregister(struct device *dev)
483{
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000484 struct snd_soc_component *component;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200485 struct dmaengine_pcm *pcm;
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200486
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000487 component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
488 if (!component)
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200489 return;
490
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000491 pcm = soc_component_to_pcm(component);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200492
Kuninori Morimotobe7ee5f2018-01-29 02:41:09 +0000493 snd_soc_unregister_component(dev);
Stephen Warren6b9f3e62013-12-03 14:26:33 -0700494 dmaengine_pcm_release_chan(pcm);
Lars-Peter Clausen28c44682013-04-15 19:19:50 +0200495 kfree(pcm);
496}
497EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
498
499MODULE_LICENSE("GPL");