blob: 0e4f65e654c4be38130a0e81add85142f06e9d6d [file] [log] [blame]
Jerome Brunet6dc4fa12018-07-17 17:42:51 +02001// SPDX-License-Identifier: (GPL-2.0 OR MIT)
2//
3// Copyright (c) 2018 BayLibre, SAS.
4// Author: Jerome Brunet <jbrunet@baylibre.com>
5
6#include <linux/clk.h>
7#include <linux/of_irq.h>
8#include <linux/of_platform.h>
9#include <linux/module.h>
10#include <linux/regmap.h>
11#include <linux/reset.h>
12#include <sound/pcm_params.h>
13#include <sound/soc.h>
14#include <sound/soc-dai.h>
15
16#include "axg-fifo.h"
17
18/*
19 * This file implements the platform operations common to the playback and
20 * capture frontend DAI. The logic behind this two types of fifo is very
21 * similar but some difference exist.
22 * These differences the respective DAI drivers
23 */
24
25static struct snd_pcm_hardware axg_fifo_hw = {
26 .info = (SNDRV_PCM_INFO_INTERLEAVED |
27 SNDRV_PCM_INFO_MMAP |
28 SNDRV_PCM_INFO_MMAP_VALID |
29 SNDRV_PCM_INFO_BLOCK_TRANSFER |
30 SNDRV_PCM_INFO_PAUSE),
31
32 .formats = AXG_FIFO_FORMATS,
33 .rate_min = 5512,
34 .rate_max = 192000,
35 .channels_min = 1,
36 .channels_max = AXG_FIFO_CH_MAX,
37 .period_bytes_min = AXG_FIFO_MIN_DEPTH,
38 .period_bytes_max = UINT_MAX,
39 .periods_min = 2,
40 .periods_max = UINT_MAX,
41
42 /* No real justification for this */
43 .buffer_bytes_max = 1 * 1024 * 1024,
44};
45
46static struct snd_soc_dai *axg_fifo_dai(struct snd_pcm_substream *ss)
47{
48 struct snd_soc_pcm_runtime *rtd = ss->private_data;
49
50 return rtd->cpu_dai;
51}
52
53static struct axg_fifo *axg_fifo_data(struct snd_pcm_substream *ss)
54{
55 struct snd_soc_dai *dai = axg_fifo_dai(ss);
56
57 return snd_soc_dai_get_drvdata(dai);
58}
59
60static struct device *axg_fifo_dev(struct snd_pcm_substream *ss)
61{
62 struct snd_soc_dai *dai = axg_fifo_dai(ss);
63
64 return dai->dev;
65}
66
67static void __dma_enable(struct axg_fifo *fifo, bool enable)
68{
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN,
70 enable ? CTRL0_DMA_EN : 0);
71}
72
73static int axg_fifo_pcm_trigger(struct snd_pcm_substream *ss, int cmd)
74{
75 struct axg_fifo *fifo = axg_fifo_data(ss);
76
77 switch (cmd) {
78 case SNDRV_PCM_TRIGGER_START:
79 case SNDRV_PCM_TRIGGER_RESUME:
80 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
81 __dma_enable(fifo, true);
82 break;
83 case SNDRV_PCM_TRIGGER_SUSPEND:
84 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
85 case SNDRV_PCM_TRIGGER_STOP:
86 __dma_enable(fifo, false);
87 break;
88 default:
89 return -EINVAL;
90 }
91
92 return 0;
93}
94
95static snd_pcm_uframes_t axg_fifo_pcm_pointer(struct snd_pcm_substream *ss)
96{
97 struct axg_fifo *fifo = axg_fifo_data(ss);
98 struct snd_pcm_runtime *runtime = ss->runtime;
99 unsigned int addr;
100
101 regmap_read(fifo->map, FIFO_STATUS2, &addr);
102
103 return bytes_to_frames(runtime, addr - (unsigned int)runtime->dma_addr);
104}
105
106static int axg_fifo_pcm_hw_params(struct snd_pcm_substream *ss,
107 struct snd_pcm_hw_params *params)
108{
109 struct snd_pcm_runtime *runtime = ss->runtime;
110 struct axg_fifo *fifo = axg_fifo_data(ss);
111 dma_addr_t end_ptr;
112 unsigned int burst_num;
113 int ret;
114
115 ret = snd_pcm_lib_malloc_pages(ss, params_buffer_bytes(params));
116 if (ret < 0)
117 return ret;
118
119 /* Setup dma memory pointers */
120 end_ptr = runtime->dma_addr + runtime->dma_bytes - AXG_FIFO_BURST;
121 regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr);
122 regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr);
123
124 /* Setup interrupt periodicity */
125 burst_num = params_period_bytes(params) / AXG_FIFO_BURST;
126 regmap_write(fifo->map, FIFO_INT_ADDR, burst_num);
127
128 /* Enable block count irq */
129 regmap_update_bits(fifo->map, FIFO_CTRL0,
130 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT),
131 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT));
132
133 return 0;
134}
135
136static int axg_fifo_pcm_hw_free(struct snd_pcm_substream *ss)
137{
138 struct axg_fifo *fifo = axg_fifo_data(ss);
139
140 /* Disable the block count irq */
141 regmap_update_bits(fifo->map, FIFO_CTRL0,
142 CTRL0_INT_EN(FIFO_INT_COUNT_REPEAT), 0);
143
144 return snd_pcm_lib_free_pages(ss);
145}
146
147static void axg_fifo_ack_irq(struct axg_fifo *fifo, u8 mask)
148{
149 regmap_update_bits(fifo->map, FIFO_CTRL1,
150 CTRL1_INT_CLR(FIFO_INT_MASK),
151 CTRL1_INT_CLR(mask));
152
153 /* Clear must also be cleared */
154 regmap_update_bits(fifo->map, FIFO_CTRL1,
155 CTRL1_INT_CLR(FIFO_INT_MASK),
156 0);
157}
158
159static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
160{
161 struct snd_pcm_substream *ss = dev_id;
162 struct axg_fifo *fifo = axg_fifo_data(ss);
163 unsigned int status;
164
165 regmap_read(fifo->map, FIFO_STATUS1, &status);
166
167 status = STATUS1_INT_STS(status) & FIFO_INT_MASK;
168 if (status & FIFO_INT_COUNT_REPEAT)
169 snd_pcm_period_elapsed(ss);
170 else
171 dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
172 status);
173
174 /* Ack irqs */
175 axg_fifo_ack_irq(fifo, status);
176
Jerome Brunet036e4962018-07-26 14:45:42 +0200177 return IRQ_RETVAL(status);
Jerome Brunet6dc4fa12018-07-17 17:42:51 +0200178}
179
180static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
181{
182 struct axg_fifo *fifo = axg_fifo_data(ss);
183 struct device *dev = axg_fifo_dev(ss);
184 int ret;
185
186 snd_soc_set_runtime_hwparams(ss, &axg_fifo_hw);
187
188 /*
189 * Make sure the buffer and period size are multiple of the FIFO
190 * minimum depth size
191 */
192 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
193 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
194 AXG_FIFO_MIN_DEPTH);
195 if (ret)
196 return ret;
197
198 ret = snd_pcm_hw_constraint_step(ss->runtime, 0,
199 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
200 AXG_FIFO_MIN_DEPTH);
201 if (ret)
202 return ret;
203
204 ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
205 dev_name(dev), ss);
Jerome Brunetdadfab72018-08-27 16:15:29 +0200206 if (ret)
207 return ret;
Jerome Brunet6dc4fa12018-07-17 17:42:51 +0200208
209 /* Enable pclk to access registers and clock the fifo ip */
210 ret = clk_prepare_enable(fifo->pclk);
211 if (ret)
212 return ret;
213
214 /* Setup status2 so it reports the memory pointer */
215 regmap_update_bits(fifo->map, FIFO_CTRL1,
216 CTRL1_STATUS2_SEL_MASK,
217 CTRL1_STATUS2_SEL(STATUS2_SEL_DDR_READ));
218
219 /* Make sure the dma is initially disabled */
220 __dma_enable(fifo, false);
221
222 /* Disable irqs until params are ready */
223 regmap_update_bits(fifo->map, FIFO_CTRL0,
224 CTRL0_INT_EN(FIFO_INT_MASK), 0);
225
226 /* Clear any pending interrupt */
227 axg_fifo_ack_irq(fifo, FIFO_INT_MASK);
228
229 /* Take memory arbitror out of reset */
230 ret = reset_control_deassert(fifo->arb);
231 if (ret)
232 clk_disable_unprepare(fifo->pclk);
233
234 return ret;
235}
236
237static int axg_fifo_pcm_close(struct snd_pcm_substream *ss)
238{
239 struct axg_fifo *fifo = axg_fifo_data(ss);
240 int ret;
241
242 /* Put the memory arbitror back in reset */
243 ret = reset_control_assert(fifo->arb);
244
245 /* Disable fifo ip and register access */
246 clk_disable_unprepare(fifo->pclk);
247
248 /* remove IRQ */
249 free_irq(fifo->irq, ss);
250
251 return ret;
252}
253
254const struct snd_pcm_ops axg_fifo_pcm_ops = {
255 .open = axg_fifo_pcm_open,
256 .close = axg_fifo_pcm_close,
257 .ioctl = snd_pcm_lib_ioctl,
258 .hw_params = axg_fifo_pcm_hw_params,
259 .hw_free = axg_fifo_pcm_hw_free,
260 .pointer = axg_fifo_pcm_pointer,
261 .trigger = axg_fifo_pcm_trigger,
262};
263EXPORT_SYMBOL_GPL(axg_fifo_pcm_ops);
264
265int axg_fifo_pcm_new(struct snd_soc_pcm_runtime *rtd, unsigned int type)
266{
267 struct snd_card *card = rtd->card->snd_card;
268 size_t size = axg_fifo_hw.buffer_bytes_max;
269
270 return snd_pcm_lib_preallocate_pages(rtd->pcm->streams[type].substream,
271 SNDRV_DMA_TYPE_DEV, card->dev,
272 size, size);
273}
274EXPORT_SYMBOL_GPL(axg_fifo_pcm_new);
275
276static const struct regmap_config axg_fifo_regmap_cfg = {
277 .reg_bits = 32,
278 .val_bits = 32,
279 .reg_stride = 4,
280 .max_register = FIFO_STATUS2,
281};
282
283int axg_fifo_probe(struct platform_device *pdev)
284{
285 struct device *dev = &pdev->dev;
286 const struct axg_fifo_match_data *data;
287 struct axg_fifo *fifo;
288 struct resource *res;
289 void __iomem *regs;
290
291 data = of_device_get_match_data(dev);
292 if (!data) {
293 dev_err(dev, "failed to match device\n");
294 return -ENODEV;
295 }
296
297 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
298 if (!fifo)
299 return -ENOMEM;
300 platform_set_drvdata(pdev, fifo);
301
302 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
303 regs = devm_ioremap_resource(dev, res);
304 if (IS_ERR(regs))
305 return PTR_ERR(regs);
306
307 fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg);
308 if (IS_ERR(fifo->map)) {
309 dev_err(dev, "failed to init regmap: %ld\n",
310 PTR_ERR(fifo->map));
311 return PTR_ERR(fifo->map);
312 }
313
314 fifo->pclk = devm_clk_get(dev, NULL);
315 if (IS_ERR(fifo->pclk)) {
316 if (PTR_ERR(fifo->pclk) != -EPROBE_DEFER)
317 dev_err(dev, "failed to get pclk: %ld\n",
318 PTR_ERR(fifo->pclk));
319 return PTR_ERR(fifo->pclk);
320 }
321
322 fifo->arb = devm_reset_control_get_exclusive(dev, NULL);
323 if (IS_ERR(fifo->arb)) {
324 if (PTR_ERR(fifo->arb) != -EPROBE_DEFER)
325 dev_err(dev, "failed to get arb reset: %ld\n",
326 PTR_ERR(fifo->arb));
327 return PTR_ERR(fifo->arb);
328 }
329
330 fifo->irq = of_irq_get(dev->of_node, 0);
331 if (fifo->irq <= 0) {
332 dev_err(dev, "failed to get irq: %d\n", fifo->irq);
333 return fifo->irq;
334 }
335
336 return devm_snd_soc_register_component(dev, data->component_drv,
337 data->dai_drv, 1);
338}
339EXPORT_SYMBOL_GPL(axg_fifo_probe);
340
341MODULE_DESCRIPTION("Amlogic AXG fifo driver");
342MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
343MODULE_LICENSE("GPL v2");