blob: 0ddbaa4b4f0b8f7b73a243eba9aeddcfe01c512f [file] [log] [blame]
Thomas Gleixneraf873fc2019-05-28 09:57:21 -07001// SPDX-License-Identifier: GPL-2.0-only
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02002/*
3 * Driver for STM32 DMA controller
4 *
5 * Inspired by dma-jz4740.c and tegra20-apb-dma.c
6 *
7 * Copyright (C) M'boumba Cedric Madianga 2015
8 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01009 * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020010 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/init.h>
Amelie Delaunay409ffc42020-01-29 16:36:27 +010018#include <linux/iopoll.h>
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020019#include <linux/jiffies.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +010026#include <linux/pm_runtime.h>
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020027#include <linux/reset.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30
31#include "virt-dma.h"
32
33#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
34#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
35#define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
36#define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
37#define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
Pierre Yves MORDRETc2d86b12018-03-13 17:42:03 +010038#define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020039#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
40#define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
41#define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
Pierre Yves MORDRET9df3bd52018-03-13 17:42:05 +010042#define STM32_DMA_MASKI (STM32_DMA_TCI \
43 | STM32_DMA_TEI \
44 | STM32_DMA_DMEI \
45 | STM32_DMA_FEI)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020046
47/* DMA Stream x Configuration Register */
48#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
49#define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
50#define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
51#define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
52#define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
53#define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
54#define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
55#define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
56#define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
57#define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
58#define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
59#define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
60#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
61#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
62#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
63#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
64#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
65#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
66#define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
67#define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
68#define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
69#define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
Pierre Yves MORDRET249d5532018-03-13 17:42:07 +010070#define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
71 */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +020072#define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
73#define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
74#define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
75#define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
76 | STM32_DMA_SCR_MINC \
77 | STM32_DMA_SCR_PINCOS \
78 | STM32_DMA_SCR_PL_MASK)
79#define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
80 | STM32_DMA_SCR_TEIE \
81 | STM32_DMA_SCR_DMEIE)
82
83/* DMA Stream x number of data register */
84#define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
85
86/* DMA stream peripheral address register */
87#define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
88
89/* DMA stream x memory 0 address register */
90#define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
91
92/* DMA stream x memory 1 address register */
93#define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
94
95/* DMA stream x FIFO control register */
96#define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
97#define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
98#define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
99#define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
100#define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
101#define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
102 | STM32_DMA_SFCR_DMDIS)
103
104/* DMA direction */
105#define STM32_DMA_DEV_TO_MEM 0x00
106#define STM32_DMA_MEM_TO_DEV 0x01
107#define STM32_DMA_MEM_TO_MEM 0x02
108
109/* DMA priority level */
110#define STM32_DMA_PRIORITY_LOW 0x00
111#define STM32_DMA_PRIORITY_MEDIUM 0x01
112#define STM32_DMA_PRIORITY_HIGH 0x02
113#define STM32_DMA_PRIORITY_VERY_HIGH 0x03
114
115/* DMA FIFO threshold selection */
116#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
117#define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
118#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
119#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
120
121#define STM32_DMA_MAX_DATA_ITEMS 0xffff
Pierre Yves MORDRET80a76952018-03-13 17:42:04 +0100122/*
123 * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
124 * gather at boundary. Thus it's safer to round down this value on FIFO
125 * size (16 Bytes)
126 */
127#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
128 ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200129#define STM32_DMA_MAX_CHANNELS 0x08
130#define STM32_DMA_MAX_REQUEST_ID 0x08
131#define STM32_DMA_MAX_DATA_PARAM 0x03
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100132#define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
133#define STM32_DMA_MIN_BURST 4
M'boumba Cedric Madianga276b0042016-12-13 14:40:51 +0100134#define STM32_DMA_MAX_BURST 16
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200135
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +0100136/* DMA Features */
137#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
138#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
139
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200140enum stm32_dma_width {
141 STM32_DMA_BYTE,
142 STM32_DMA_HALF_WORD,
143 STM32_DMA_WORD,
144};
145
146enum stm32_dma_burst_size {
147 STM32_DMA_BURST_SINGLE,
148 STM32_DMA_BURST_INCR4,
149 STM32_DMA_BURST_INCR8,
150 STM32_DMA_BURST_INCR16,
151};
152
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +0100153/**
154 * struct stm32_dma_cfg - STM32 DMA custom configuration
155 * @channel_id: channel ID
156 * @request_line: DMA request
157 * @stream_config: 32bit mask specifying the DMA channel configuration
158 * @features: 32bit mask specifying the DMA Feature list
159 */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200160struct stm32_dma_cfg {
161 u32 channel_id;
162 u32 request_line;
163 u32 stream_config;
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +0100164 u32 features;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200165};
166
167struct stm32_dma_chan_reg {
168 u32 dma_lisr;
169 u32 dma_hisr;
170 u32 dma_lifcr;
171 u32 dma_hifcr;
172 u32 dma_scr;
173 u32 dma_sndtr;
174 u32 dma_spar;
175 u32 dma_sm0ar;
176 u32 dma_sm1ar;
177 u32 dma_sfcr;
178};
179
180struct stm32_dma_sg_req {
181 u32 len;
182 struct stm32_dma_chan_reg chan_reg;
183};
184
185struct stm32_dma_desc {
186 struct virt_dma_desc vdesc;
187 bool cyclic;
188 u32 num_sgs;
189 struct stm32_dma_sg_req sg_req[];
190};
191
192struct stm32_dma_chan {
193 struct virt_dma_chan vchan;
194 bool config_init;
195 bool busy;
196 u32 id;
197 u32 irq;
198 struct stm32_dma_desc *desc;
199 u32 next_sg;
200 struct dma_slave_config dma_sconfig;
201 struct stm32_dma_chan_reg chan_reg;
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +0100202 u32 threshold;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100203 u32 mem_burst;
204 u32 mem_width;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200205};
206
207struct stm32_dma_device {
208 struct dma_device ddev;
209 void __iomem *base;
210 struct clk *clk;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200211 bool mem2mem;
212 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
213};
214
215static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
216{
217 return container_of(chan->vchan.chan.device, struct stm32_dma_device,
218 ddev);
219}
220
221static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
222{
223 return container_of(c, struct stm32_dma_chan, vchan.chan);
224}
225
226static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
227{
228 return container_of(vdesc, struct stm32_dma_desc, vdesc);
229}
230
231static struct device *chan2dev(struct stm32_dma_chan *chan)
232{
233 return &chan->vchan.chan.dev->device;
234}
235
236static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
237{
238 return readl_relaxed(dmadev->base + reg);
239}
240
241static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
242{
243 writel_relaxed(val, dmadev->base + reg);
244}
245
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200246static int stm32_dma_get_width(struct stm32_dma_chan *chan,
247 enum dma_slave_buswidth width)
248{
249 switch (width) {
250 case DMA_SLAVE_BUSWIDTH_1_BYTE:
251 return STM32_DMA_BYTE;
252 case DMA_SLAVE_BUSWIDTH_2_BYTES:
253 return STM32_DMA_HALF_WORD;
254 case DMA_SLAVE_BUSWIDTH_4_BYTES:
255 return STM32_DMA_WORD;
256 default:
257 dev_err(chan2dev(chan), "Dma bus width not supported\n");
258 return -EINVAL;
259 }
260}
261
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100262static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
263 u32 threshold)
264{
265 enum dma_slave_buswidth max_width;
266
267 if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
268 max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
269 else
270 max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
271
272 while ((buf_len < max_width || buf_len % max_width) &&
273 max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
274 max_width = max_width >> 1;
275
276 return max_width;
277}
278
279static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
280 enum dma_slave_buswidth width)
281{
282 u32 remaining;
283
284 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
285 if (burst != 0) {
286 /*
287 * If number of beats fit in several whole bursts
288 * this configuration is allowed.
289 */
290 remaining = ((STM32_DMA_FIFO_SIZE / width) *
291 (threshold + 1) / 4) % burst;
292
293 if (remaining == 0)
294 return true;
295 } else {
296 return true;
297 }
298 }
299
300 return false;
301}
302
303static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
304{
Pierre-Yves MORDRETcc832dc2018-09-11 09:31:16 +0200305 /*
306 * Buffer or period length has to be aligned on FIFO depth.
307 * Otherwise bytes may be stuck within FIFO at buffer or period
308 * length.
309 */
310 return ((buf_len % ((threshold + 1) * 4)) == 0);
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100311}
312
313static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
314 enum dma_slave_buswidth width)
315{
316 u32 best_burst = max_burst;
317
318 if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
319 return 0;
320
321 while ((buf_len < best_burst * width && best_burst > 1) ||
322 !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
323 width)) {
324 if (best_burst > STM32_DMA_MIN_BURST)
325 best_burst = best_burst >> 1;
326 else
327 best_burst = 0;
328 }
329
330 return best_burst;
331}
332
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200333static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
334{
335 switch (maxburst) {
336 case 0:
337 case 1:
338 return STM32_DMA_BURST_SINGLE;
339 case 4:
340 return STM32_DMA_BURST_INCR4;
341 case 8:
342 return STM32_DMA_BURST_INCR8;
343 case 16:
344 return STM32_DMA_BURST_INCR16;
345 default:
346 dev_err(chan2dev(chan), "Dma burst size not supported\n");
347 return -EINVAL;
348 }
349}
350
351static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100352 u32 src_burst, u32 dst_burst)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200353{
354 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
355 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
356
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100357 if (!src_burst && !dst_burst) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200358 /* Using direct mode */
359 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
360 } else {
361 /* Using FIFO mode */
362 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
363 }
364}
365
366static int stm32_dma_slave_config(struct dma_chan *c,
367 struct dma_slave_config *config)
368{
369 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
370
371 memcpy(&chan->dma_sconfig, config, sizeof(*config));
372
373 chan->config_init = true;
374
375 return 0;
376}
377
378static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
379{
380 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
381 u32 flags, dma_isr;
382
383 /*
384 * Read "flags" from DMA_xISR register corresponding to the selected
385 * DMA channel at the correct bit offset inside that register.
386 *
387 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
388 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
389 */
390
391 if (chan->id & 4)
392 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
393 else
394 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
395
396 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
397
Pierre Yves MORDRET9df3bd52018-03-13 17:42:05 +0100398 return flags & STM32_DMA_MASKI;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200399}
400
401static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
402{
403 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
404 u32 dma_ifcr;
405
406 /*
407 * Write "flags" to the DMA_xIFCR register corresponding to the selected
408 * DMA channel at the correct bit offset inside that register.
409 *
410 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
411 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
412 */
Pierre Yves MORDRET9df3bd52018-03-13 17:42:05 +0100413 flags &= STM32_DMA_MASKI;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200414 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
415
416 if (chan->id & 4)
417 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
418 else
419 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
420}
421
422static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
423{
424 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
Amelie Delaunay409ffc42020-01-29 16:36:27 +0100425 u32 dma_scr, id, reg;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200426
427 id = chan->id;
Amelie Delaunay409ffc42020-01-29 16:36:27 +0100428 reg = STM32_DMA_SCR(id);
429 dma_scr = stm32_dma_read(dmadev, reg);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200430
431 if (dma_scr & STM32_DMA_SCR_EN) {
432 dma_scr &= ~STM32_DMA_SCR_EN;
Amelie Delaunay409ffc42020-01-29 16:36:27 +0100433 stm32_dma_write(dmadev, reg, dma_scr);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200434
Amelie Delaunay409ffc42020-01-29 16:36:27 +0100435 return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
436 dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
437 10, 1000000);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200438 }
439
440 return 0;
441}
442
443static void stm32_dma_stop(struct stm32_dma_chan *chan)
444{
445 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
446 u32 dma_scr, dma_sfcr, status;
447 int ret;
448
449 /* Disable interrupts */
450 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
451 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
452 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
453 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
454 dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
455 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
456
457 /* Disable DMA */
458 ret = stm32_dma_disable_chan(chan);
459 if (ret < 0)
460 return;
461
462 /* Clear interrupt status if it is there */
463 status = stm32_dma_irq_status(chan);
464 if (status) {
465 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
466 __func__, status);
467 stm32_dma_irq_clear(chan, status);
468 }
469
470 chan->busy = false;
471}
472
473static int stm32_dma_terminate_all(struct dma_chan *c)
474{
475 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
476 unsigned long flags;
477 LIST_HEAD(head);
478
479 spin_lock_irqsave(&chan->vchan.lock, flags);
480
Amelie Delaunayd80cbef2020-01-29 16:36:28 +0100481 if (chan->desc) {
482 vchan_terminate_vdesc(&chan->desc->vdesc);
483 if (chan->busy)
484 stm32_dma_stop(chan);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200485 chan->desc = NULL;
486 }
487
488 vchan_get_all_descriptors(&chan->vchan, &head);
489 spin_unlock_irqrestore(&chan->vchan.lock, flags);
490 vchan_dma_desc_free_list(&chan->vchan, &head);
491
492 return 0;
493}
494
M'boumba Cedric Madiangadc808672016-12-13 14:40:50 +0100495static void stm32_dma_synchronize(struct dma_chan *c)
496{
497 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
498
499 vchan_synchronize(&chan->vchan);
500}
501
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200502static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
503{
504 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
505 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
506 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
507 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
508 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
509 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
510 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
511
512 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
513 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
514 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
515 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
516 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
517 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
518}
519
Pierre Yves MORDRETe57cb3b2018-03-13 17:42:06 +0100520static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
521
M'boumba Cedric Madianga8d1b76f2016-12-13 14:40:47 +0100522static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200523{
524 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
525 struct virt_dma_desc *vdesc;
526 struct stm32_dma_sg_req *sg_req;
527 struct stm32_dma_chan_reg *reg;
528 u32 status;
529 int ret;
530
531 ret = stm32_dma_disable_chan(chan);
532 if (ret < 0)
M'boumba Cedric Madianga8d1b76f2016-12-13 14:40:47 +0100533 return;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200534
535 if (!chan->desc) {
536 vdesc = vchan_next_desc(&chan->vchan);
537 if (!vdesc)
M'boumba Cedric Madianga8d1b76f2016-12-13 14:40:47 +0100538 return;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200539
Amelie Delaunayd80cbef2020-01-29 16:36:28 +0100540 list_del(&vdesc->node);
541
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200542 chan->desc = to_stm32_dma_desc(vdesc);
543 chan->next_sg = 0;
544 }
545
546 if (chan->next_sg == chan->desc->num_sgs)
547 chan->next_sg = 0;
548
549 sg_req = &chan->desc->sg_req[chan->next_sg];
550 reg = &sg_req->chan_reg;
551
Pierre-Yves MORDRET22a0bb22020-01-29 16:36:24 +0100552 reg->dma_scr &= ~STM32_DMA_SCR_EN;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200553 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
554 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
555 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
556 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
557 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
558 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
559
560 chan->next_sg++;
561
562 /* Clear interrupt status if it is there */
563 status = stm32_dma_irq_status(chan);
564 if (status)
565 stm32_dma_irq_clear(chan, status);
566
Pierre Yves MORDRETe57cb3b2018-03-13 17:42:06 +0100567 if (chan->desc->cyclic)
568 stm32_dma_configure_next_sg(chan);
569
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200570 stm32_dma_dump_reg(chan);
571
572 /* Start DMA */
573 reg->dma_scr |= STM32_DMA_SCR_EN;
574 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
575
576 chan->busy = true;
577
Benjamin Gaignard90ec93c2018-07-06 15:02:20 +0200578 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200579}
580
581static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
582{
583 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
584 struct stm32_dma_sg_req *sg_req;
585 u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
586
587 id = chan->id;
588 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
589
590 if (dma_scr & STM32_DMA_SCR_DBM) {
591 if (chan->next_sg == chan->desc->num_sgs)
592 chan->next_sg = 0;
593
594 sg_req = &chan->desc->sg_req[chan->next_sg];
595
596 if (dma_scr & STM32_DMA_SCR_CT) {
597 dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
598 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
599 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
600 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
601 } else {
602 dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
603 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
604 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
605 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
606 }
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200607 }
608}
609
610static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
611{
612 if (chan->desc) {
613 if (chan->desc->cyclic) {
614 vchan_cyclic_callback(&chan->desc->vdesc);
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +0100615 chan->next_sg++;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200616 stm32_dma_configure_next_sg(chan);
617 } else {
618 chan->busy = false;
619 if (chan->next_sg == chan->desc->num_sgs) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200620 vchan_cookie_complete(&chan->desc->vdesc);
621 chan->desc = NULL;
622 }
623 stm32_dma_start_transfer(chan);
624 }
625 }
626}
627
628static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
629{
630 struct stm32_dma_chan *chan = devid;
631 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
Pierre-Yves MORDRETca4c72c2019-01-03 11:17:29 +0100632 u32 status, scr, sfcr;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200633
634 spin_lock(&chan->vchan.lock);
635
636 status = stm32_dma_irq_status(chan);
637 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
Pierre-Yves MORDRETca4c72c2019-01-03 11:17:29 +0100638 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200639
Pierre Yves MORDRETc2d86b12018-03-13 17:42:03 +0100640 if (status & STM32_DMA_TCI) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200641 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
Pierre Yves MORDRETc2d86b12018-03-13 17:42:03 +0100642 if (scr & STM32_DMA_SCR_TCIE)
643 stm32_dma_handle_chan_done(chan);
644 status &= ~STM32_DMA_TCI;
645 }
646 if (status & STM32_DMA_HTI) {
647 stm32_dma_irq_clear(chan, STM32_DMA_HTI);
648 status &= ~STM32_DMA_HTI;
649 }
650 if (status & STM32_DMA_FEI) {
651 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
652 status &= ~STM32_DMA_FEI;
Pierre-Yves MORDRETca4c72c2019-01-03 11:17:29 +0100653 if (sfcr & STM32_DMA_SFCR_FEIE) {
654 if (!(scr & STM32_DMA_SCR_EN))
655 dev_err(chan2dev(chan), "FIFO Error\n");
656 else
657 dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
658 }
Pierre Yves MORDRETc2d86b12018-03-13 17:42:03 +0100659 }
660 if (status) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200661 stm32_dma_irq_clear(chan, status);
662 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
Pierre Yves MORDRETc2d86b12018-03-13 17:42:03 +0100663 if (!(scr & STM32_DMA_SCR_EN))
664 dev_err(chan2dev(chan), "chan disabled by HW\n");
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200665 }
666
667 spin_unlock(&chan->vchan.lock);
668
669 return IRQ_HANDLED;
670}
671
672static void stm32_dma_issue_pending(struct dma_chan *c)
673{
674 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
675 unsigned long flags;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200676
677 spin_lock_irqsave(&chan->vchan.lock, flags);
M'boumba Cedric Madianga8d1b76f2016-12-13 14:40:47 +0100678 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
Benjamin Gaignard90ec93c2018-07-06 15:02:20 +0200679 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
M'boumba Cedric Madianga8d1b76f2016-12-13 14:40:47 +0100680 stm32_dma_start_transfer(chan);
Pierre Yves MORDRETe57cb3b2018-03-13 17:42:06 +0100681
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200682 }
683 spin_unlock_irqrestore(&chan->vchan.lock, flags);
684}
685
686static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
687 enum dma_transfer_direction direction,
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100688 enum dma_slave_buswidth *buswidth,
689 u32 buf_len)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200690{
691 enum dma_slave_buswidth src_addr_width, dst_addr_width;
692 int src_bus_width, dst_bus_width;
693 int src_burst_size, dst_burst_size;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100694 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
695 u32 dma_scr, threshold;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200696
697 src_addr_width = chan->dma_sconfig.src_addr_width;
698 dst_addr_width = chan->dma_sconfig.dst_addr_width;
699 src_maxburst = chan->dma_sconfig.src_maxburst;
700 dst_maxburst = chan->dma_sconfig.dst_maxburst;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100701 threshold = chan->threshold;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200702
703 switch (direction) {
704 case DMA_MEM_TO_DEV:
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100705 /* Set device data size */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200706 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
707 if (dst_bus_width < 0)
708 return dst_bus_width;
709
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100710 /* Set device burst size */
711 dst_best_burst = stm32_dma_get_best_burst(buf_len,
712 dst_maxburst,
713 threshold,
714 dst_addr_width);
715
716 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200717 if (dst_burst_size < 0)
718 return dst_burst_size;
719
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100720 /* Set memory data size */
721 src_addr_width = stm32_dma_get_max_width(buf_len, threshold);
722 chan->mem_width = src_addr_width;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200723 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
724 if (src_bus_width < 0)
725 return src_bus_width;
726
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100727 /* Set memory burst size */
728 src_maxburst = STM32_DMA_MAX_BURST;
729 src_best_burst = stm32_dma_get_best_burst(buf_len,
730 src_maxburst,
731 threshold,
732 src_addr_width);
733 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200734 if (src_burst_size < 0)
735 return src_burst_size;
736
737 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
738 STM32_DMA_SCR_PSIZE(dst_bus_width) |
739 STM32_DMA_SCR_MSIZE(src_bus_width) |
740 STM32_DMA_SCR_PBURST(dst_burst_size) |
741 STM32_DMA_SCR_MBURST(src_burst_size);
742
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100743 /* Set FIFO threshold */
744 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
745 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
746
747 /* Set peripheral address */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200748 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
749 *buswidth = dst_addr_width;
750 break;
751
752 case DMA_DEV_TO_MEM:
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100753 /* Set device data size */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200754 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
755 if (src_bus_width < 0)
756 return src_bus_width;
757
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100758 /* Set device burst size */
759 src_best_burst = stm32_dma_get_best_burst(buf_len,
760 src_maxburst,
761 threshold,
762 src_addr_width);
763 chan->mem_burst = src_best_burst;
764 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200765 if (src_burst_size < 0)
766 return src_burst_size;
767
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100768 /* Set memory data size */
769 dst_addr_width = stm32_dma_get_max_width(buf_len, threshold);
770 chan->mem_width = dst_addr_width;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200771 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
772 if (dst_bus_width < 0)
773 return dst_bus_width;
774
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100775 /* Set memory burst size */
776 dst_maxburst = STM32_DMA_MAX_BURST;
777 dst_best_burst = stm32_dma_get_best_burst(buf_len,
778 dst_maxburst,
779 threshold,
780 dst_addr_width);
781 chan->mem_burst = dst_best_burst;
782 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200783 if (dst_burst_size < 0)
784 return dst_burst_size;
785
786 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
787 STM32_DMA_SCR_PSIZE(src_bus_width) |
788 STM32_DMA_SCR_MSIZE(dst_bus_width) |
789 STM32_DMA_SCR_PBURST(src_burst_size) |
790 STM32_DMA_SCR_MBURST(dst_burst_size);
791
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100792 /* Set FIFO threshold */
793 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
794 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold);
795
796 /* Set peripheral address */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200797 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
798 *buswidth = chan->dma_sconfig.src_addr_width;
799 break;
800
801 default:
802 dev_err(chan2dev(chan), "Dma direction is not supported\n");
803 return -EINVAL;
804 }
805
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100806 stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200807
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100808 /* Set DMA control register */
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200809 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
810 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
811 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
812 chan->chan_reg.dma_scr |= dma_scr;
813
814 return 0;
815}
816
817static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
818{
819 memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
820}
821
822static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
823 struct dma_chan *c, struct scatterlist *sgl,
824 u32 sg_len, enum dma_transfer_direction direction,
825 unsigned long flags, void *context)
826{
827 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
828 struct stm32_dma_desc *desc;
829 struct scatterlist *sg;
830 enum dma_slave_buswidth buswidth;
831 u32 nb_data_items;
832 int i, ret;
833
834 if (!chan->config_init) {
835 dev_err(chan2dev(chan), "dma channel is not configured\n");
836 return NULL;
837 }
838
839 if (sg_len < 1) {
840 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
841 return NULL;
842 }
843
Gustavo A. R. Silva402096c2019-08-30 11:14:23 -0500844 desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200845 if (!desc)
846 return NULL;
847
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200848 /* Set peripheral flow controller */
849 if (chan->dma_sconfig.device_fc)
850 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
851 else
852 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
853
854 for_each_sg(sgl, sg, sg_len, i) {
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100855 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
856 sg_dma_len(sg));
857 if (ret < 0)
858 goto err;
859
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200860 desc->sg_req[i].len = sg_dma_len(sg);
861
862 nb_data_items = desc->sg_req[i].len / buswidth;
Pierre Yves MORDRET80a76952018-03-13 17:42:04 +0100863 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200864 dev_err(chan2dev(chan), "nb items not supported\n");
865 goto err;
866 }
867
868 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
869 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
870 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
871 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
872 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
873 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
874 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
875 }
876
877 desc->num_sgs = sg_len;
878 desc->cyclic = false;
879
880 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
881
882err:
883 kfree(desc);
884 return NULL;
885}
886
887static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
888 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
889 size_t period_len, enum dma_transfer_direction direction,
890 unsigned long flags)
891{
892 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
893 struct stm32_dma_desc *desc;
894 enum dma_slave_buswidth buswidth;
895 u32 num_periods, nb_data_items;
896 int i, ret;
897
898 if (!buf_len || !period_len) {
899 dev_err(chan2dev(chan), "Invalid buffer/period len\n");
900 return NULL;
901 }
902
903 if (!chan->config_init) {
904 dev_err(chan2dev(chan), "dma channel is not configured\n");
905 return NULL;
906 }
907
908 if (buf_len % period_len) {
909 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
910 return NULL;
911 }
912
913 /*
914 * We allow to take more number of requests till DMA is
915 * not started. The driver will loop over all requests.
916 * Once DMA is started then new requests can be queued only after
917 * terminating the DMA.
918 */
919 if (chan->busy) {
920 dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
921 return NULL;
922 }
923
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100924 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200925 if (ret < 0)
926 return NULL;
927
928 nb_data_items = period_len / buswidth;
Pierre Yves MORDRET80a76952018-03-13 17:42:04 +0100929 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200930 dev_err(chan2dev(chan), "number of items not supported\n");
931 return NULL;
932 }
933
934 /* Enable Circular mode or double buffer mode */
935 if (buf_len == period_len)
936 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
937 else
938 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
939
940 /* Clear periph ctrl if client set it */
941 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
942
943 num_periods = buf_len / period_len;
944
Gustavo A. R. Silva402096c2019-08-30 11:14:23 -0500945 desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200946 if (!desc)
947 return NULL;
948
949 for (i = 0; i < num_periods; i++) {
950 desc->sg_req[i].len = period_len;
951
952 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
953 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
954 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
955 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
956 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
957 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
958 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
959 buf_addr += period_len;
960 }
961
962 desc->num_sgs = num_periods;
963 desc->cyclic = true;
964
965 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
966}
967
968static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
969 struct dma_chan *c, dma_addr_t dest,
970 dma_addr_t src, size_t len, unsigned long flags)
971{
972 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100973 enum dma_slave_buswidth max_width;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200974 struct stm32_dma_desc *desc;
975 size_t xfer_count, offset;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100976 u32 num_sgs, best_burst, dma_burst, threshold;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200977 int i;
978
Pierre Yves MORDRET80a76952018-03-13 17:42:04 +0100979 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
Gustavo A. R. Silva402096c2019-08-30 11:14:23 -0500980 desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200981 if (!desc)
982 return NULL;
983
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100984 threshold = chan->threshold;
985
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200986 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
987 xfer_count = min_t(size_t, len - offset,
Pierre Yves MORDRET80a76952018-03-13 17:42:04 +0100988 STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200989
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100990 /* Compute best burst size */
991 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
992 best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
993 threshold, max_width);
994 dma_burst = stm32_dma_get_burst(chan, best_burst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +0200995
996 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
997 desc->sg_req[i].chan_reg.dma_scr =
998 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +0100999 STM32_DMA_SCR_PBURST(dma_burst) |
1000 STM32_DMA_SCR_MBURST(dma_burst) |
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001001 STM32_DMA_SCR_MINC |
1002 STM32_DMA_SCR_PINC |
1003 STM32_DMA_SCR_TCIE |
1004 STM32_DMA_SCR_TEIE;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01001005 desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
1006 desc->sg_req[i].chan_reg.dma_sfcr |=
1007 STM32_DMA_SFCR_FTH(threshold);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001008 desc->sg_req[i].chan_reg.dma_spar = src + offset;
1009 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
1010 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01001011 desc->sg_req[i].len = xfer_count;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001012 }
1013
1014 desc->num_sgs = num_sgs;
1015 desc->cyclic = false;
1016
1017 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1018}
1019
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +01001020static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
1021{
1022 u32 dma_scr, width, ndtr;
1023 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1024
1025 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
1026 width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
1027 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
1028
1029 return ndtr << width;
1030}
1031
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001032/**
1033 * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
1034 * @chan: dma channel
1035 *
1036 * This function called when IRQ are disable, checks that the hardware has not
1037 * switched on the next transfer in double buffer mode. The test is done by
1038 * comparing the next_sg memory address with the hardware related register
1039 * (based on CT bit value).
1040 *
1041 * Returns true if expected current transfer is still running or double
1042 * buffer mode is not activated.
1043 */
1044static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
1045{
1046 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1047 struct stm32_dma_sg_req *sg_req;
1048 u32 dma_scr, dma_smar, id;
1049
1050 id = chan->id;
1051 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1052
1053 if (!(dma_scr & STM32_DMA_SCR_DBM))
1054 return true;
1055
1056 sg_req = &chan->desc->sg_req[chan->next_sg];
1057
1058 if (dma_scr & STM32_DMA_SCR_CT) {
1059 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
1060 return (dma_smar == sg_req->chan_reg.dma_sm0ar);
1061 }
1062
1063 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
1064
1065 return (dma_smar == sg_req->chan_reg.dma_sm1ar);
1066}
1067
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001068static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
1069 struct stm32_dma_desc *desc,
1070 u32 next_sg)
1071{
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01001072 u32 modulo, burst_size;
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001073 u32 residue;
1074 u32 n_sg = next_sg;
1075 struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001076 int i;
1077
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +01001078 /*
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001079 * Calculate the residue means compute the descriptors
1080 * information:
1081 * - the sg_req currently transferred
1082 * - the Hardware remaining position in this sg (NDTR bits field).
1083 *
1084 * A race condition may occur if DMA is running in cyclic or double
1085 * buffer mode, since the DMA register are automatically reloaded at end
1086 * of period transfer. The hardware may have switched to the next
1087 * transfer (CT bit updated) just before the position (SxNDTR reg) is
1088 * read.
1089 * In this case the SxNDTR reg could (or not) correspond to the new
1090 * transfer position, and not the expected one.
1091 * The strategy implemented in the stm32 driver is to:
1092 * - read the SxNDTR register
1093 * - crosscheck that hardware is still in current transfer.
1094 * In case of switch, we can assume that the DMA is at the beginning of
1095 * the next transfer. So we approximate the residue in consequence, by
1096 * pointing on the beginning of next transfer.
1097 *
1098 * This race condition doesn't apply for none cyclic mode, as double
1099 * buffer is not used. In such situation registers are updated by the
1100 * software.
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +01001101 */
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001102
1103 residue = stm32_dma_get_remaining_bytes(chan);
1104
1105 if (!stm32_dma_is_current_sg(chan)) {
1106 n_sg++;
1107 if (n_sg == chan->desc->num_sgs)
1108 n_sg = 0;
1109 residue = sg_req->len;
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01001110 }
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001111
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +01001112 /*
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001113 * In cyclic mode, for the last period, residue = remaining bytes
1114 * from NDTR,
1115 * else for all other periods in cyclic mode, and in sg mode,
1116 * residue = remaining bytes from NDTR + remaining
1117 * periods/sg to be transferred
M'boumba Cedric Madianga2b12c5582016-12-13 14:40:48 +01001118 */
Arnaud Pouliquen2a4885a2019-05-02 11:28:42 +02001119 if (!chan->desc->cyclic || n_sg != 0)
1120 for (i = n_sg; i < desc->num_sgs; i++)
1121 residue += desc->sg_req[i].len;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001122
Pierre Yves MORDRETa2b61032018-03-13 17:42:02 +01001123 if (!chan->mem_burst)
1124 return residue;
1125
1126 burst_size = chan->mem_burst * chan->mem_width;
1127 modulo = residue % burst_size;
1128 if (modulo)
1129 residue = residue - modulo + burst_size;
1130
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001131 return residue;
1132}
1133
1134static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1135 dma_cookie_t cookie,
1136 struct dma_tx_state *state)
1137{
1138 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1139 struct virt_dma_desc *vdesc;
1140 enum dma_status status;
1141 unsigned long flags;
M'boumba Cedric Madianga57b5a322016-12-13 14:40:46 +01001142 u32 residue = 0;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001143
1144 status = dma_cookie_status(c, cookie, state);
Pierre Yves MORDRET249d5532018-03-13 17:42:07 +01001145 if (status == DMA_COMPLETE || !state)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001146 return status;
1147
1148 spin_lock_irqsave(&chan->vchan.lock, flags);
1149 vdesc = vchan_find_desc(&chan->vchan, cookie);
M'boumba Cedric Madianga57b5a322016-12-13 14:40:46 +01001150 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001151 residue = stm32_dma_desc_residue(chan, chan->desc,
1152 chan->next_sg);
M'boumba Cedric Madianga57b5a322016-12-13 14:40:46 +01001153 else if (vdesc)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001154 residue = stm32_dma_desc_residue(chan,
1155 to_stm32_dma_desc(vdesc), 0);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001156 dma_set_residue(state, residue);
1157
1158 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1159
1160 return status;
1161}
1162
1163static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
1164{
1165 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1166 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1167 int ret;
1168
1169 chan->config_init = false;
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001170
1171 ret = pm_runtime_get_sync(dmadev->ddev.dev);
1172 if (ret < 0)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001173 return ret;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001174
1175 ret = stm32_dma_disable_chan(chan);
1176 if (ret < 0)
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001177 pm_runtime_put(dmadev->ddev.dev);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001178
1179 return ret;
1180}
1181
1182static void stm32_dma_free_chan_resources(struct dma_chan *c)
1183{
1184 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1185 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1186 unsigned long flags;
1187
1188 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
1189
1190 if (chan->busy) {
1191 spin_lock_irqsave(&chan->vchan.lock, flags);
1192 stm32_dma_stop(chan);
1193 chan->desc = NULL;
1194 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1195 }
1196
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001197 pm_runtime_put(dmadev->ddev.dev);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001198
1199 vchan_free_chan_resources(to_virt_chan(c));
1200}
1201
1202static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
1203{
1204 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
1205}
1206
Vinod Koule97adb42016-09-02 15:59:10 +05301207static void stm32_dma_set_config(struct stm32_dma_chan *chan,
Pierre Yves MORDRET249d5532018-03-13 17:42:07 +01001208 struct stm32_dma_cfg *cfg)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001209{
1210 stm32_dma_clear_reg(&chan->chan_reg);
1211
1212 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
1213 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
1214
1215 /* Enable Interrupts */
1216 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
1217
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +01001218 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001219}
1220
1221static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
1222 struct of_dma *ofdma)
1223{
1224 struct stm32_dma_device *dmadev = ofdma->of_dma_data;
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001225 struct device *dev = dmadev->ddev.dev;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001226 struct stm32_dma_cfg cfg;
1227 struct stm32_dma_chan *chan;
1228 struct dma_chan *c;
1229
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001230 if (dma_spec->args_count < 4) {
1231 dev_err(dev, "Bad number of cells\n");
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001232 return NULL;
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001233 }
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001234
1235 cfg.channel_id = dma_spec->args[0];
1236 cfg.request_line = dma_spec->args[1];
1237 cfg.stream_config = dma_spec->args[2];
Pierre Yves MORDRET951f44c2018-03-13 17:42:01 +01001238 cfg.features = dma_spec->args[3];
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001239
Pierre Yves MORDRET249d5532018-03-13 17:42:07 +01001240 if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
1241 cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001242 dev_err(dev, "Bad channel and/or request id\n");
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001243 return NULL;
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001244 }
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001245
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001246 chan = &dmadev->chan[cfg.channel_id];
1247
1248 c = dma_get_slave_channel(&chan->vchan.chan);
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001249 if (!c) {
Colin Ian King041cf7e2017-02-21 18:30:45 +00001250 dev_err(dev, "No more channels available\n");
M'boumba Cedric Madianga5df4eb42017-01-05 09:09:40 +01001251 return NULL;
1252 }
1253
1254 stm32_dma_set_config(chan, &cfg);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001255
1256 return c;
1257}
1258
1259static const struct of_device_id stm32_dma_of_match[] = {
1260 { .compatible = "st,stm32-dma", },
1261 { /* sentinel */ },
1262};
1263MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
1264
1265static int stm32_dma_probe(struct platform_device *pdev)
1266{
1267 struct stm32_dma_chan *chan;
1268 struct stm32_dma_device *dmadev;
1269 struct dma_device *dd;
1270 const struct of_device_id *match;
1271 struct resource *res;
Etienne Carriere8cf1e0f2020-01-29 16:36:22 +01001272 struct reset_control *rst;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001273 int i, ret;
1274
1275 match = of_match_device(stm32_dma_of_match, &pdev->dev);
1276 if (!match) {
1277 dev_err(&pdev->dev, "Error: No device match found\n");
1278 return -ENODEV;
1279 }
1280
1281 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
1282 if (!dmadev)
1283 return -ENOMEM;
1284
1285 dd = &dmadev->ddev;
1286
1287 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1288 dmadev->base = devm_ioremap_resource(&pdev->dev, res);
1289 if (IS_ERR(dmadev->base))
1290 return PTR_ERR(dmadev->base);
1291
1292 dmadev->clk = devm_clk_get(&pdev->dev, NULL);
1293 if (IS_ERR(dmadev->clk)) {
Etienne Carriere615eee22020-01-29 16:36:23 +01001294 ret = PTR_ERR(dmadev->clk);
1295 if (ret != -EPROBE_DEFER)
1296 dev_err(&pdev->dev, "Can't get clock\n");
1297 return ret;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001298 }
1299
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001300 ret = clk_prepare_enable(dmadev->clk);
1301 if (ret < 0) {
1302 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1303 return ret;
1304 }
1305
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001306 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
1307 "st,mem2mem");
1308
Etienne Carriere8cf1e0f2020-01-29 16:36:22 +01001309 rst = devm_reset_control_get(&pdev->dev, NULL);
Etienne Carriere615eee22020-01-29 16:36:23 +01001310 if (IS_ERR(rst)) {
1311 ret = PTR_ERR(rst);
1312 if (ret == -EPROBE_DEFER)
1313 goto clk_free;
1314 } else {
Etienne Carriere8cf1e0f2020-01-29 16:36:22 +01001315 reset_control_assert(rst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001316 udelay(2);
Etienne Carriere8cf1e0f2020-01-29 16:36:22 +01001317 reset_control_deassert(rst);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001318 }
1319
Amelie Delaunayd7a9e422020-01-29 16:36:25 +01001320 dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1321
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001322 dma_cap_set(DMA_SLAVE, dd->cap_mask);
1323 dma_cap_set(DMA_PRIVATE, dd->cap_mask);
1324 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
1325 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
1326 dd->device_free_chan_resources = stm32_dma_free_chan_resources;
1327 dd->device_tx_status = stm32_dma_tx_status;
1328 dd->device_issue_pending = stm32_dma_issue_pending;
1329 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
1330 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
1331 dd->device_config = stm32_dma_slave_config;
1332 dd->device_terminate_all = stm32_dma_terminate_all;
M'boumba Cedric Madiangadc808672016-12-13 14:40:50 +01001333 dd->device_synchronize = stm32_dma_synchronize;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001334 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1335 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1336 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1337 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1338 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1339 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1340 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1341 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
Amelie Delaunay32ce1082020-01-29 16:36:26 +01001342 dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
M'boumba Cedric Madianga276b0042016-12-13 14:40:51 +01001343 dd->max_burst = STM32_DMA_MAX_BURST;
Pierre-Yves MORDRET22a0bb22020-01-29 16:36:24 +01001344 dd->descriptor_reuse = true;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001345 dd->dev = &pdev->dev;
1346 INIT_LIST_HEAD(&dd->channels);
1347
1348 if (dmadev->mem2mem) {
1349 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1350 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
1351 dd->directions |= BIT(DMA_MEM_TO_MEM);
1352 }
1353
1354 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1355 chan = &dmadev->chan[i];
1356 chan->id = i;
1357 chan->vchan.desc_free = stm32_dma_desc_free;
1358 vchan_init(&chan->vchan, dd);
1359 }
1360
1361 ret = dma_async_device_register(dd);
1362 if (ret)
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001363 goto clk_free;
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001364
1365 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1366 chan = &dmadev->chan[i];
Vinod Koulc6504be2019-04-26 22:30:27 +05301367 ret = platform_get_irq(pdev, i);
Stephen Boyde17be6e2019-07-30 11:15:10 -07001368 if (ret < 0)
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001369 goto err_unregister;
Vinod Koulc6504be2019-04-26 22:30:27 +05301370 chan->irq = ret;
1371
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001372 ret = devm_request_irq(&pdev->dev, chan->irq,
1373 stm32_dma_chan_irq, 0,
1374 dev_name(chan2dev(chan)), chan);
1375 if (ret) {
1376 dev_err(&pdev->dev,
1377 "request_irq failed with err %d channel %d\n",
1378 ret, i);
1379 goto err_unregister;
1380 }
1381 }
1382
1383 ret = of_dma_controller_register(pdev->dev.of_node,
1384 stm32_dma_of_xlate, dmadev);
1385 if (ret < 0) {
1386 dev_err(&pdev->dev,
1387 "STM32 DMA DMA OF registration failed %d\n", ret);
1388 goto err_unregister;
1389 }
1390
1391 platform_set_drvdata(pdev, dmadev);
1392
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001393 pm_runtime_set_active(&pdev->dev);
1394 pm_runtime_enable(&pdev->dev);
1395 pm_runtime_get_noresume(&pdev->dev);
1396 pm_runtime_put(&pdev->dev);
1397
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001398 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
1399
1400 return 0;
1401
1402err_unregister:
1403 dma_async_device_unregister(dd);
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001404clk_free:
1405 clk_disable_unprepare(dmadev->clk);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001406
1407 return ret;
1408}
1409
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001410#ifdef CONFIG_PM
1411static int stm32_dma_runtime_suspend(struct device *dev)
1412{
1413 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1414
1415 clk_disable_unprepare(dmadev->clk);
1416
1417 return 0;
1418}
1419
1420static int stm32_dma_runtime_resume(struct device *dev)
1421{
1422 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1423 int ret;
1424
1425 ret = clk_prepare_enable(dmadev->clk);
1426 if (ret) {
1427 dev_err(dev, "failed to prepare_enable clock\n");
1428 return ret;
1429 }
1430
1431 return 0;
1432}
1433#endif
1434
Pierre-Yves MORDRET05f87402020-01-29 16:36:21 +01001435#ifdef CONFIG_PM_SLEEP
1436static int stm32_dma_suspend(struct device *dev)
1437{
1438 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1439 int id, ret, scr;
1440
1441 ret = pm_runtime_get_sync(dev);
1442 if (ret < 0)
1443 return ret;
1444
1445 for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
1446 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1447 if (scr & STM32_DMA_SCR_EN) {
1448 dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
1449 return -EBUSY;
1450 }
1451 }
1452
1453 pm_runtime_put_sync(dev);
1454
1455 pm_runtime_force_suspend(dev);
1456
1457 return 0;
1458}
1459
1460static int stm32_dma_resume(struct device *dev)
1461{
1462 return pm_runtime_force_resume(dev);
1463}
1464#endif
1465
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001466static const struct dev_pm_ops stm32_dma_pm_ops = {
Pierre-Yves MORDRET05f87402020-01-29 16:36:21 +01001467 SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001468 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1469 stm32_dma_runtime_resume, NULL)
1470};
1471
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001472static struct platform_driver stm32_dma_driver = {
1473 .driver = {
1474 .name = "stm32-dma",
1475 .of_match_table = stm32_dma_of_match,
Pierre-Yves MORDRET48bc73b2019-01-03 11:17:08 +01001476 .pm = &stm32_dma_pm_ops,
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001477 },
Etienne Carriere615eee22020-01-29 16:36:23 +01001478 .probe = stm32_dma_probe,
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001479};
1480
1481static int __init stm32_dma_init(void)
1482{
Etienne Carriere615eee22020-01-29 16:36:23 +01001483 return platform_driver_register(&stm32_dma_driver);
M'boumba Cedric Madiangad8b46832015-10-16 15:59:14 +02001484}
1485subsys_initcall(stm32_dma_init);