blob: 0ac8e7b34e128e9455d478ae4fdf785124dc1b45 [file] [log] [blame]
Thomas Gleixner2025cf92019-05-29 07:18:02 -07001// SPDX-License-Identifier: GPL-2.0-only
Saeed Bisharaff7b0472008-07-08 11:58:36 -07002/*
3 * offload engine driver for the Marvell XOR engine
4 * Copyright (C) 2007, 2008, Marvell International Ltd.
Saeed Bisharaff7b0472008-07-08 11:58:36 -07005 */
6
7#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -07009#include <linux/delay.h>
10#include <linux/dma-mapping.h>
11#include <linux/spinlock.h>
12#include <linux/interrupt.h>
Lior Amsalem6f166312015-05-26 15:07:34 +020013#include <linux/of_device.h>
Saeed Bisharaff7b0472008-07-08 11:58:36 -070014#include <linux/platform_device.h>
15#include <linux/memory.h>
Andrew Lunnc5101822012-02-19 13:30:26 +010016#include <linux/clk.h>
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +010017#include <linux/of.h>
18#include <linux/of_irq.h>
19#include <linux/irqdomain.h>
Thomas Petazzoni77757292015-07-08 16:28:19 +020020#include <linux/cpumask.h>
Arnd Bergmannc02cecb2012-08-24 15:21:54 +020021#include <linux/platform_data/dma-mv_xor.h>
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000022
23#include "dmaengine.h"
Saeed Bisharaff7b0472008-07-08 11:58:36 -070024#include "mv_xor.h"
25
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020026enum mv_xor_type {
27 XOR_ORION,
28 XOR_ARMADA_38X,
Marcin Wojtasac5f0f32016-04-29 09:49:07 +020029 XOR_ARMADA_37XX,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +020030};
31
Lior Amsalem6f166312015-05-26 15:07:34 +020032enum mv_xor_mode {
33 XOR_MODE_IN_REG,
34 XOR_MODE_IN_DESC,
35};
36
Saeed Bisharaff7b0472008-07-08 11:58:36 -070037static void mv_xor_issue_pending(struct dma_chan *chan);
38
39#define to_mv_xor_chan(chan) \
Thomas Petazzoni98817b92012-11-15 14:57:44 +010040 container_of(chan, struct mv_xor_chan, dmachan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070041
42#define to_mv_xor_slot(tx) \
43 container_of(tx, struct mv_xor_desc_slot, async_tx)
44
Thomas Petazzonic98c1782012-11-15 14:17:18 +010045#define mv_chan_to_devp(chan) \
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +010046 ((chan)->dmadev.dev)
Thomas Petazzonic98c1782012-11-15 14:17:18 +010047
Lior Amsalemdfc97662014-08-27 10:52:51 -030048static void mv_desc_init(struct mv_xor_desc_slot *desc,
Lior Amsalemba87d132014-08-27 10:52:53 -030049 dma_addr_t addr, u32 byte_count,
50 enum dma_ctrl_flags flags)
Saeed Bisharaff7b0472008-07-08 11:58:36 -070051{
52 struct mv_xor_desc *hw_desc = desc->hw_desc;
53
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -030054 hw_desc->status = XOR_DESC_DMA_OWNED;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070055 hw_desc->phy_next_desc = 0;
Lior Amsalemba87d132014-08-27 10:52:53 -030056 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */
57 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ?
58 XOR_DESC_EOD_INT_EN : 0;
Lior Amsalemdfc97662014-08-27 10:52:51 -030059 hw_desc->phy_dest_addr = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070060 hw_desc->byte_count = byte_count;
61}
62
Lior Amsalem6f166312015-05-26 15:07:34 +020063static void mv_desc_set_mode(struct mv_xor_desc_slot *desc)
64{
65 struct mv_xor_desc *hw_desc = desc->hw_desc;
66
67 switch (desc->type) {
68 case DMA_XOR:
69 case DMA_INTERRUPT:
70 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR;
71 break;
72 case DMA_MEMCPY:
73 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY;
74 break;
75 default:
76 BUG();
77 return;
78 }
79}
80
Saeed Bisharaff7b0472008-07-08 11:58:36 -070081static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
82 u32 next_desc_addr)
83{
84 struct mv_xor_desc *hw_desc = desc->hw_desc;
85 BUG_ON(hw_desc->phy_next_desc);
86 hw_desc->phy_next_desc = next_desc_addr;
87}
88
Saeed Bisharaff7b0472008-07-08 11:58:36 -070089static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
90 int index, dma_addr_t addr)
91{
92 struct mv_xor_desc *hw_desc = desc->hw_desc;
Thomas Petazzonie03bc652013-07-29 17:42:14 +020093 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
Saeed Bisharaff7b0472008-07-08 11:58:36 -070094 if (desc->type == DMA_XOR)
95 hw_desc->desc_command |= (1 << index);
96}
97
98static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
99{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200100 return readl_relaxed(XOR_CURR_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700101}
102
103static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
104 u32 next_desc_addr)
105{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200106 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700107}
108
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700109static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
110{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200111 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700112 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200113 writel_relaxed(val, XOR_INTR_MASK(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700114}
115
116static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
117{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200118 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700119 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
120 return intr_cause;
121}
122
Maxime Ripard0951e722015-05-26 15:07:33 +0200123static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700124{
Lior Amsalemba87d132014-08-27 10:52:53 -0300125 u32 val;
126
127 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED;
128 val = ~(val << (chan->idx * 16));
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100129 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200130 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700131}
132
Maxime Ripard0951e722015-05-26 15:07:33 +0200133static void mv_chan_clear_err_status(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700134{
135 u32 val = 0xFFFF0000 >> (chan->idx * 16);
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200136 writel_relaxed(val, XOR_INTR_CAUSE(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700137}
138
Maxime Ripard0951e722015-05-26 15:07:33 +0200139static void mv_chan_set_mode(struct mv_xor_chan *chan,
Thomas Petazzoni81aafb32015-12-22 11:43:28 +0100140 u32 op_mode)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700141{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200142 u32 config = readl_relaxed(XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700143
Lior Amsalem6f166312015-05-26 15:07:34 +0200144 config &= ~0x7;
145 config |= op_mode;
146
Thomas Petazzonie03bc652013-07-29 17:42:14 +0200147#if defined(__BIG_ENDIAN)
148 config |= XOR_DESCRIPTOR_SWAP;
149#else
150 config &= ~XOR_DESCRIPTOR_SWAP;
151#endif
152
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200153 writel_relaxed(config, XOR_CONFIG(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700154}
155
156static void mv_chan_activate(struct mv_xor_chan *chan)
157{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100158 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
Ezequiel Garcia5a9a55b2014-05-21 14:02:35 -0700159
160 /* writel ensures all descriptors are flushed before activation */
161 writel(BIT(0), XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700162}
163
164static char mv_chan_is_busy(struct mv_xor_chan *chan)
165{
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200166 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700167
168 state = (state >> 4) & 0x3;
169
170 return (state == 1) ? 1 : 0;
171}
172
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700173/*
Maxime Ripard0951e722015-05-26 15:07:33 +0200174 * mv_chan_start_new_chain - program the engine to operate on new
175 * chain headed by sw_desc
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700176 * Caller must hold &mv_chan->lock while calling this function
177 */
Maxime Ripard0951e722015-05-26 15:07:33 +0200178static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan,
179 struct mv_xor_desc_slot *sw_desc)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700180{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100181 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700182 __func__, __LINE__, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700183
Bartlomiej Zolnierkiewicz48a9db42013-07-03 15:05:06 -0700184 /* set the hardware chain */
185 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
186
Lior Amsalemdfc97662014-08-27 10:52:51 -0300187 mv_chan->pending++;
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100188 mv_xor_issue_pending(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700189}
190
191static dma_cookie_t
Maxime Ripard0951e722015-05-26 15:07:33 +0200192mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
193 struct mv_xor_chan *mv_chan,
194 dma_cookie_t cookie)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700195{
196 BUG_ON(desc->async_tx.cookie < 0);
197
198 if (desc->async_tx.cookie > 0) {
199 cookie = desc->async_tx.cookie;
200
Dave Jiang8058e252016-07-25 10:34:08 -0700201 dma_descriptor_unmap(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700202 /* call the callback (must not sleep or submit new
203 * operations to this channel)
204 */
Dave Jiangee7681a2016-07-20 13:12:13 -0700205 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700206 }
207
208 /* run dependent operations */
Dan Williams07f22112009-01-05 17:14:31 -0700209 dma_run_dependencies(&desc->async_tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700210
211 return cookie;
212}
213
214static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200215mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700216{
217 struct mv_xor_desc_slot *iter, *_iter;
218
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100219 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700220 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200221 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700222
Stefan Roesec5db8582016-10-26 10:10:25 +0200223 if (async_tx_test_ack(&iter->async_tx)) {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200224 list_move_tail(&iter->node, &mv_chan->free_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200225 if (!list_empty(&iter->sg_tx_list)) {
226 list_splice_tail_init(&iter->sg_tx_list,
227 &mv_chan->free_slots);
228 }
229 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700230 }
231 return 0;
232}
233
234static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200235mv_desc_clean_slot(struct mv_xor_desc_slot *desc,
236 struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700237{
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100238 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700239 __func__, __LINE__, desc, desc->async_tx.flags);
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200240
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700241 /* the client is allowed to attach dependent operations
242 * until 'ack' is set
243 */
Stefan Roesec5db8582016-10-26 10:10:25 +0200244 if (!async_tx_test_ack(&desc->async_tx)) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700245 /* move this slot to the completed_slots */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200246 list_move_tail(&desc->node, &mv_chan->completed_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200247 if (!list_empty(&desc->sg_tx_list)) {
248 list_splice_tail_init(&desc->sg_tx_list,
249 &mv_chan->completed_slots);
250 }
251 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200252 list_move_tail(&desc->node, &mv_chan->free_slots);
Stefan Roesec5db8582016-10-26 10:10:25 +0200253 if (!list_empty(&desc->sg_tx_list)) {
254 list_splice_tail_init(&desc->sg_tx_list,
255 &mv_chan->free_slots);
256 }
257 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700258
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700259 return 0;
260}
261
Ezequiel Garciafbeec992014-03-07 16:46:47 -0300262/* This function must be called with the mv_xor_chan spinlock held */
Maxime Ripard0951e722015-05-26 15:07:33 +0200263static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700264{
265 struct mv_xor_desc_slot *iter, *_iter;
266 dma_cookie_t cookie = 0;
267 int busy = mv_chan_is_busy(mv_chan);
268 u32 current_desc = mv_chan_get_current_desc(mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200269 int current_cleaned = 0;
270 struct mv_xor_desc *hw_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700271
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
273 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
Maxime Ripard0951e722015-05-26 15:07:33 +0200274 mv_chan_clean_completed_slots(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700275
276 /* free completed slots from the chain starting with
277 * the oldest descriptor
278 */
279
280 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200281 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700282
Lior Amsalem91362912015-05-26 15:07:32 +0200283 /* clean finished descriptors */
284 hw_desc = iter->hw_desc;
285 if (hw_desc->status & XOR_DESC_SUCCESS) {
Maxime Ripard0951e722015-05-26 15:07:33 +0200286 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan,
287 cookie);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700288
Lior Amsalem91362912015-05-26 15:07:32 +0200289 /* done processing desc, clean slot */
Maxime Ripard0951e722015-05-26 15:07:33 +0200290 mv_desc_clean_slot(iter, mv_chan);
Lior Amsalem91362912015-05-26 15:07:32 +0200291
292 /* break if we did cleaned the current */
293 if (iter->async_tx.phys == current_desc) {
294 current_cleaned = 1;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700295 break;
Lior Amsalem91362912015-05-26 15:07:32 +0200296 }
297 } else {
298 if (iter->async_tx.phys == current_desc) {
299 current_cleaned = 0;
300 break;
301 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700302 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700303 }
304
305 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200306 if (current_cleaned) {
307 /*
308 * current descriptor cleaned and removed, run
309 * from list head
310 */
311 iter = list_entry(mv_chan->chain.next,
312 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200313 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200314 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200315 } else {
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200316 if (!list_is_last(&iter->node, &mv_chan->chain)) {
Lior Amsalem91362912015-05-26 15:07:32 +0200317 /*
318 * descriptors are still waiting after
319 * current, trigger them
320 */
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200321 iter = list_entry(iter->node.next,
Lior Amsalem91362912015-05-26 15:07:32 +0200322 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200323 node);
Maxime Ripard0951e722015-05-26 15:07:33 +0200324 mv_chan_start_new_chain(mv_chan, iter);
Lior Amsalem91362912015-05-26 15:07:32 +0200325 } else {
326 /*
327 * some descriptors are still waiting
328 * to be cleaned
329 */
330 tasklet_schedule(&mv_chan->irq_tasklet);
331 }
332 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700333 }
334
335 if (cookie > 0)
Thomas Petazzoni98817b92012-11-15 14:57:44 +0100336 mv_chan->dmachan.completed_cookie = cookie;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700337}
338
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700339static void mv_xor_tasklet(unsigned long data)
340{
341 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300342
Barry Songcbc229a2018-08-17 06:02:02 -0700343 spin_lock(&chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200344 mv_chan_slot_cleanup(chan);
Barry Songcbc229a2018-08-17 06:02:02 -0700345 spin_unlock(&chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700346}
347
348static struct mv_xor_desc_slot *
Maxime Ripard0951e722015-05-26 15:07:33 +0200349mv_chan_alloc_slot(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700350{
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200351 struct mv_xor_desc_slot *iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700352
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200353 spin_lock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700354
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200355 if (!list_empty(&mv_chan->free_slots)) {
356 iter = list_first_entry(&mv_chan->free_slots,
357 struct mv_xor_desc_slot,
358 node);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300359
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200360 list_move_tail(&iter->node, &mv_chan->allocated_slots);
361
362 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700363
Lior Amsalemdfc97662014-08-27 10:52:51 -0300364 /* pre-ack descriptor */
365 async_tx_ack(&iter->async_tx);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300366 iter->async_tx.cookie = -EBUSY;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700367
Lior Amsalemdfc97662014-08-27 10:52:51 -0300368 return iter;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700369
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700370 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200371
372 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700373
374 /* try to free some slots if the allocation fails */
375 tasklet_schedule(&mv_chan->irq_tasklet);
376
377 return NULL;
378}
379
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700380/************************ DMA engine API functions ****************************/
381static dma_cookie_t
382mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
383{
384 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
385 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300386 struct mv_xor_desc_slot *old_chain_tail;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700387 dma_cookie_t cookie;
388 int new_hw_chain = 1;
389
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100390 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700391 "%s sw_desc %p: async_tx %p\n",
392 __func__, sw_desc, &sw_desc->async_tx);
393
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700394 spin_lock_bh(&mv_chan->lock);
Russell King - ARM Linux884485e2012-03-06 22:34:46 +0000395 cookie = dma_cookie_assign(tx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700396
397 if (list_empty(&mv_chan->chain))
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200398 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700399 else {
400 new_hw_chain = 0;
401
402 old_chain_tail = list_entry(mv_chan->chain.prev,
403 struct mv_xor_desc_slot,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200404 node);
405 list_move_tail(&sw_desc->node, &mv_chan->chain);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700406
Olof Johansson31fd8f52014-02-03 17:13:23 -0800407 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
408 &old_chain_tail->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700409
410 /* fix up the hardware chain */
Lior Amsalemdfc97662014-08-27 10:52:51 -0300411 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700412
413 /* if the channel is not busy */
414 if (!mv_chan_is_busy(mv_chan)) {
415 u32 current_desc = mv_chan_get_current_desc(mv_chan);
416 /*
417 * and the curren desc is the end of the chain before
418 * the append, then we need to start the channel
419 */
420 if (current_desc == old_chain_tail->async_tx.phys)
421 new_hw_chain = 1;
422 }
423 }
424
425 if (new_hw_chain)
Maxime Ripard0951e722015-05-26 15:07:33 +0200426 mv_chan_start_new_chain(mv_chan, sw_desc);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700427
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700428 spin_unlock_bh(&mv_chan->lock);
429
430 return cookie;
431}
432
433/* returns the number of allocated descriptors */
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700434static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700435{
Olof Johansson31fd8f52014-02-03 17:13:23 -0800436 void *virt_desc;
437 dma_addr_t dma_desc;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700438 int idx;
439 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
440 struct mv_xor_desc_slot *slot = NULL;
Thomas Petazzonib503fa02012-11-15 15:55:30 +0100441 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700442
443 /* Allocate descriptor slots */
444 idx = mv_chan->slots_allocated;
445 while (idx < num_descs_in_pool) {
446 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
447 if (!slot) {
Ezequiel Garciab8291dd2014-08-27 10:52:49 -0300448 dev_info(mv_chan_to_devp(mv_chan),
449 "channel only initialized %d descriptor slots",
450 idx);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700451 break;
452 }
Olof Johansson31fd8f52014-02-03 17:13:23 -0800453 virt_desc = mv_chan->dma_desc_pool_virt;
454 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700455
456 dma_async_tx_descriptor_init(&slot->async_tx, chan);
457 slot->async_tx.tx_submit = mv_xor_tx_submit;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200458 INIT_LIST_HEAD(&slot->node);
Stefan Roesec5db8582016-10-26 10:10:25 +0200459 INIT_LIST_HEAD(&slot->sg_tx_list);
Olof Johansson31fd8f52014-02-03 17:13:23 -0800460 dma_desc = mv_chan->dma_desc_pool;
461 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700462 slot->idx = idx++;
463
464 spin_lock_bh(&mv_chan->lock);
465 mv_chan->slots_allocated = idx;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200466 list_add_tail(&slot->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700467 spin_unlock_bh(&mv_chan->lock);
468 }
469
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100470 dev_dbg(mv_chan_to_devp(mv_chan),
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200471 "allocated %d descriptor slots\n",
472 mv_chan->slots_allocated);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700473
474 return mv_chan->slots_allocated ? : -ENOMEM;
475}
476
Stefan Roese77ff7a72016-09-15 07:37:31 +0200477/*
478 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add
479 * a new MBus window if necessary. Use a cache for these check so that
480 * the MMIO mapped registers don't have to be accessed for this check
481 * to speed up this process.
482 */
483static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr)
484{
485 struct mv_xor_device *xordev = mv_chan->xordev;
486 void __iomem *base = mv_chan->mmr_high_base;
487 u32 win_enable;
488 u32 size;
489 u8 target, attr;
490 int ret;
491 int i;
492
493 /* Nothing needs to get done for the Armada 3700 */
494 if (xordev->xor_type == XOR_ARMADA_37XX)
495 return 0;
496
497 /*
498 * Loop over the cached windows to check, if the requested area
499 * is already mapped. If this the case, nothing needs to be done
500 * and we can return.
501 */
502 for (i = 0; i < WINDOW_COUNT; i++) {
503 if (addr >= xordev->win_start[i] &&
504 addr <= xordev->win_end[i]) {
505 /* Window is already mapped */
506 return 0;
507 }
508 }
509
510 /*
511 * The window is not mapped, so we need to create the new mapping
512 */
513
514 /* If no IO window is found that addr has to be located in SDRAM */
515 ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr);
516 if (ret < 0)
517 return 0;
518
519 /*
520 * Mask the base addr 'addr' according to 'size' read back from the
521 * MBus window. Otherwise we might end up with an address located
522 * somewhere in the middle of this area here.
523 */
524 size -= 1;
525 addr &= ~size;
526
527 /*
528 * Reading one of both enabled register is enough, as they are always
529 * programmed to the identical values
530 */
531 win_enable = readl(base + WINDOW_BAR_ENABLE(0));
532
533 /* Set 'i' to the first free window to write the new values to */
534 i = ffs(~win_enable) - 1;
535 if (i >= WINDOW_COUNT)
536 return -ENOMEM;
537
538 writel((addr & 0xffff0000) | (attr << 8) | target,
539 base + WINDOW_BASE(i));
540 writel(size & 0xffff0000, base + WINDOW_SIZE(i));
541
542 /* Fill the caching variables for later use */
543 xordev->win_start[i] = addr;
544 xordev->win_end[i] = addr + size;
545
546 win_enable |= (1 << i);
547 win_enable |= 3 << (16 + (2 * i));
548 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
549 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
550
551 return 0;
552}
553
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700554static struct dma_async_tx_descriptor *
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700555mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
556 unsigned int src_cnt, size_t len, unsigned long flags)
557{
558 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Lior Amsalemdfc97662014-08-27 10:52:51 -0300559 struct mv_xor_desc_slot *sw_desc;
Stefan Roese77ff7a72016-09-15 07:37:31 +0200560 int ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700561
562 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
563 return NULL;
564
Coly Li7912d302011-03-27 01:26:53 +0800565 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700566
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100567 dev_dbg(mv_chan_to_devp(mv_chan),
Gregory CLEMENTbc822e12016-04-29 09:49:05 +0200568 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
Olof Johansson31fd8f52014-02-03 17:13:23 -0800569 __func__, src_cnt, len, &dest, flags);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700570
Stefan Roese77ff7a72016-09-15 07:37:31 +0200571 /* Check if a new window needs to get added for 'dest' */
572 ret = mv_xor_add_io_win(mv_chan, dest);
573 if (ret)
574 return NULL;
575
Maxime Ripard0951e722015-05-26 15:07:33 +0200576 sw_desc = mv_chan_alloc_slot(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700577 if (sw_desc) {
578 sw_desc->type = DMA_XOR;
579 sw_desc->async_tx.flags = flags;
Lior Amsalemba87d132014-08-27 10:52:53 -0300580 mv_desc_init(sw_desc, dest, len, flags);
Lior Amsalem6f166312015-05-26 15:07:34 +0200581 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
582 mv_desc_set_mode(sw_desc);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200583 while (src_cnt--) {
584 /* Check if a new window needs to get added for 'src' */
585 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]);
586 if (ret)
587 return NULL;
Lior Amsalemdfc97662014-08-27 10:52:51 -0300588 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
Stefan Roese77ff7a72016-09-15 07:37:31 +0200589 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700590 }
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200591
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100592 dev_dbg(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700593 "%s sw_desc %p async_tx %p \n",
594 __func__, sw_desc, &sw_desc->async_tx);
595 return sw_desc ? &sw_desc->async_tx : NULL;
596}
597
Lior Amsalem3e4f52e2014-08-27 10:52:50 -0300598static struct dma_async_tx_descriptor *
599mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
600 size_t len, unsigned long flags)
601{
602 /*
603 * A MEMCPY operation is identical to an XOR operation with only
604 * a single source address.
605 */
606 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
607}
608
Lior Amsalem22843542014-08-27 10:52:55 -0300609static struct dma_async_tx_descriptor *
610mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
611{
612 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
613 dma_addr_t src, dest;
614 size_t len;
615
616 src = mv_chan->dummy_src_addr;
617 dest = mv_chan->dummy_dst_addr;
618 len = MV_XOR_MIN_BYTE_COUNT;
619
620 /*
621 * We implement the DMA_INTERRUPT operation as a minimum sized
622 * XOR operation with a single dummy source address.
623 */
624 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags);
625}
626
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700627static void mv_xor_free_chan_resources(struct dma_chan *chan)
628{
629 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
630 struct mv_xor_desc_slot *iter, *_iter;
631 int in_use_descs = 0;
632
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700633 spin_lock_bh(&mv_chan->lock);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300634
Maxime Ripard0951e722015-05-26 15:07:33 +0200635 mv_chan_slot_cleanup(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700636
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700637 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200638 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700639 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200640 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700641 }
642 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200643 node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700644 in_use_descs++;
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200645 list_move_tail(&iter->node, &mv_chan->free_slots);
646 }
647 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
648 node) {
649 in_use_descs++;
650 list_move_tail(&iter->node, &mv_chan->free_slots);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700651 }
652 list_for_each_entry_safe_reverse(
Lior Amsalemfbea28a2015-05-26 15:07:36 +0200653 iter, _iter, &mv_chan->free_slots, node) {
654 list_del(&iter->node);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700655 kfree(iter);
656 mv_chan->slots_allocated--;
657 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700658
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100659 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700660 __func__, mv_chan->slots_allocated);
661 spin_unlock_bh(&mv_chan->lock);
662
663 if (in_use_descs)
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100664 dev_err(mv_chan_to_devp(mv_chan),
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700665 "freeing %d in use descriptors!\n", in_use_descs);
666}
667
668/**
Linus Walleij07934482010-03-26 16:50:49 -0700669 * mv_xor_status - poll the status of an XOR transaction
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700670 * @chan: XOR channel handle
671 * @cookie: XOR transaction identifier
Linus Walleij07934482010-03-26 16:50:49 -0700672 * @txstate: XOR transactions state holder (or NULL)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700673 */
Linus Walleij07934482010-03-26 16:50:49 -0700674static enum dma_status mv_xor_status(struct dma_chan *chan,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700675 dma_cookie_t cookie,
Linus Walleij07934482010-03-26 16:50:49 -0700676 struct dma_tx_state *txstate)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700677{
678 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700679 enum dma_status ret;
680
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000681 ret = dma_cookie_status(chan, cookie, txstate);
Ezequiel Garcia890766d2014-03-07 16:46:45 -0300682 if (ret == DMA_COMPLETE)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700683 return ret;
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300684
685 spin_lock_bh(&mv_chan->lock);
Maxime Ripard0951e722015-05-26 15:07:33 +0200686 mv_chan_slot_cleanup(mv_chan);
Ezequiel Garciae43147a2014-03-07 16:46:46 -0300687 spin_unlock_bh(&mv_chan->lock);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700688
Russell King - ARM Linux96a2af42012-03-06 22:35:27 +0000689 return dma_cookie_status(chan, cookie, txstate);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700690}
691
Maxime Ripard0951e722015-05-26 15:07:33 +0200692static void mv_chan_dump_regs(struct mv_xor_chan *chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700693{
694 u32 val;
695
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200696 val = readl_relaxed(XOR_CONFIG(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700697 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700698
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200699 val = readl_relaxed(XOR_ACTIVATION(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700700 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700701
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200702 val = readl_relaxed(XOR_INTR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700703 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700704
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200705 val = readl_relaxed(XOR_INTR_MASK(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700706 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700707
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200708 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700709 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700710
Thomas Petazzoni5733c382013-07-29 17:42:13 +0200711 val = readl_relaxed(XOR_ERROR_ADDR(chan));
Joe Perches1ba151c2012-10-28 01:05:44 -0700712 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700713}
714
Maxime Ripard0951e722015-05-26 15:07:33 +0200715static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan,
716 u32 intr_cause)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700717{
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300718 if (intr_cause & XOR_INT_ERR_DECODE) {
719 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n");
720 return;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700721 }
722
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300723 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n",
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100724 chan->idx, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700725
Maxime Ripard0951e722015-05-26 15:07:33 +0200726 mv_chan_dump_regs(chan);
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300727 WARN_ON(1);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700728}
729
730static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
731{
732 struct mv_xor_chan *chan = data;
733 u32 intr_cause = mv_chan_get_intr_cause(chan);
734
Thomas Petazzonic98c1782012-11-15 14:17:18 +0100735 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700736
Ezequiel Garcia0e7488e2014-08-27 10:52:52 -0300737 if (intr_cause & XOR_INTR_ERRORS)
Maxime Ripard0951e722015-05-26 15:07:33 +0200738 mv_chan_err_interrupt_handler(chan, intr_cause);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700739
740 tasklet_schedule(&chan->irq_tasklet);
741
Maxime Ripard0951e722015-05-26 15:07:33 +0200742 mv_chan_clear_eoc_cause(chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700743
744 return IRQ_HANDLED;
745}
746
747static void mv_xor_issue_pending(struct dma_chan *chan)
748{
749 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
750
751 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
752 mv_chan->pending = 0;
753 mv_chan_activate(mv_chan);
754 }
755}
756
757/*
758 * Perform a transaction to verify the HW works.
759 */
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700760
Maxime Ripard0951e722015-05-26 15:07:33 +0200761static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700762{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300763 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700764 void *src, *dest;
765 dma_addr_t src_dma, dest_dma;
766 struct dma_chan *dma_chan;
767 dma_cookie_t cookie;
768 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300769 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700770 int err = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700771
Kees Cook6da2ec52018-06-12 13:55:00 -0700772 src = kmalloc(PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700773 if (!src)
774 return -ENOMEM;
775
Kees Cook6396bb22018-06-12 14:03:40 -0700776 dest = kzalloc(PAGE_SIZE, GFP_KERNEL);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700777 if (!dest) {
778 kfree(src);
779 return -ENOMEM;
780 }
781
782 /* Fill in src buffer */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300783 for (i = 0; i < PAGE_SIZE; i++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700784 ((u8 *) src)[i] = (u8)i;
785
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100786 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700787 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700788 err = -ENODEV;
789 goto out;
790 }
791
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300792 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
793 if (!unmap) {
794 err = -ENOMEM;
795 goto free_resources;
796 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700797
Stefan Roese51564632016-06-01 12:43:32 +0200798 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
Geliang Tangb70e52c2017-04-22 09:18:04 +0800799 offset_in_page(src), PAGE_SIZE,
Stefan Roese51564632016-06-01 12:43:32 +0200800 DMA_TO_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300801 unmap->addr[0] = src_dma;
802
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300803 ret = dma_mapping_error(dma_chan->device->dev, src_dma);
804 if (ret) {
805 err = -ENOMEM;
806 goto free_resources;
807 }
808 unmap->to_cnt = 1;
809
Stefan Roese51564632016-06-01 12:43:32 +0200810 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
Geliang Tangb70e52c2017-04-22 09:18:04 +0800811 offset_in_page(dest), PAGE_SIZE,
Stefan Roese51564632016-06-01 12:43:32 +0200812 DMA_FROM_DEVICE);
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300813 unmap->addr[1] = dest_dma;
814
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300815 ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
816 if (ret) {
817 err = -ENOMEM;
818 goto free_resources;
819 }
820 unmap->from_cnt = 1;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300821 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700822
823 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300824 PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300825 if (!tx) {
826 dev_err(dma_chan->device->dev,
827 "Self-test cannot prepare operation, disabling\n");
828 err = -ENODEV;
829 goto free_resources;
830 }
831
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700832 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300833 if (dma_submit_error(cookie)) {
834 dev_err(dma_chan->device->dev,
835 "Self-test submit error, disabling\n");
836 err = -ENODEV;
837 goto free_resources;
838 }
839
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700840 mv_xor_issue_pending(dma_chan);
841 async_tx_ack(tx);
842 msleep(1);
843
Linus Walleij07934482010-03-26 16:50:49 -0700844 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530845 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100846 dev_err(dma_chan->device->dev,
847 "Self-test copy timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700848 err = -ENODEV;
849 goto free_resources;
850 }
851
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100852 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300853 PAGE_SIZE, DMA_FROM_DEVICE);
854 if (memcmp(src, dest, PAGE_SIZE)) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100855 dev_err(dma_chan->device->dev,
856 "Self-test copy failed compare, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700857 err = -ENODEV;
858 goto free_resources;
859 }
860
861free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300862 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700863 mv_xor_free_chan_resources(dma_chan);
864out:
865 kfree(src);
866 kfree(dest);
867 return err;
868}
869
870#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
Bill Pemberton463a1f82012-11-19 13:22:55 -0500871static int
Maxime Ripard0951e722015-05-26 15:07:33 +0200872mv_chan_xor_self_test(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700873{
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300874 int i, src_idx, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700875 struct page *dest;
876 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
877 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
878 dma_addr_t dest_dma;
879 struct dma_async_tx_descriptor *tx;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300880 struct dmaengine_unmap_data *unmap;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700881 struct dma_chan *dma_chan;
882 dma_cookie_t cookie;
883 u8 cmp_byte = 0;
884 u32 cmp_word;
885 int err = 0;
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300886 int src_count = MV_XOR_NUM_SRC_TEST;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700887
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300888 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700889 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100890 if (!xor_srcs[src_idx]) {
891 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700892 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100893 return -ENOMEM;
894 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700895 }
896
897 dest = alloc_page(GFP_KERNEL);
Roel Kluina09b09a2009-02-25 13:56:21 +0100898 if (!dest) {
899 while (src_idx--)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700900 __free_page(xor_srcs[src_idx]);
Roel Kluina09b09a2009-02-25 13:56:21 +0100901 return -ENOMEM;
902 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700903
904 /* Fill in src buffers */
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300905 for (src_idx = 0; src_idx < src_count; src_idx++) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700906 u8 *ptr = page_address(xor_srcs[src_idx]);
907 for (i = 0; i < PAGE_SIZE; i++)
908 ptr[i] = (1 << src_idx);
909 }
910
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300911 for (src_idx = 0; src_idx < src_count; src_idx++)
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700912 cmp_byte ^= (u8) (1 << src_idx);
913
914 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
915 (cmp_byte << 8) | cmp_byte;
916
917 memset(page_address(dest), 0, PAGE_SIZE);
918
Thomas Petazzoni275cc0c2012-11-15 15:09:42 +0100919 dma_chan = &mv_chan->dmachan;
Dan Williamsaa1e6f12009-01-06 11:38:17 -0700920 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700921 err = -ENODEV;
922 goto out;
923 }
924
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300925 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
926 GFP_KERNEL);
927 if (!unmap) {
928 err = -ENOMEM;
929 goto free_resources;
930 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700931
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300932 /* test xor */
933 for (i = 0; i < src_count; i++) {
934 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
935 0, PAGE_SIZE, DMA_TO_DEVICE);
936 dma_srcs[i] = unmap->addr[i];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300937 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
938 if (ret) {
939 err = -ENOMEM;
940 goto free_resources;
941 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300942 unmap->to_cnt++;
943 }
944
945 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
946 DMA_FROM_DEVICE);
947 dest_dma = unmap->addr[src_count];
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300948 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
949 if (ret) {
950 err = -ENOMEM;
951 goto free_resources;
952 }
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300953 unmap->from_cnt = 1;
954 unmap->len = PAGE_SIZE;
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700955
956 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300957 src_count, PAGE_SIZE, 0);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300958 if (!tx) {
959 dev_err(dma_chan->device->dev,
960 "Self-test cannot prepare operation, disabling\n");
961 err = -ENODEV;
962 goto free_resources;
963 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700964
965 cookie = mv_xor_tx_submit(tx);
Ezequiel Garciab8c01d22013-12-10 09:32:37 -0300966 if (dma_submit_error(cookie)) {
967 dev_err(dma_chan->device->dev,
968 "Self-test submit error, disabling\n");
969 err = -ENODEV;
970 goto free_resources;
971 }
972
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700973 mv_xor_issue_pending(dma_chan);
974 async_tx_ack(tx);
975 msleep(8);
976
Linus Walleij07934482010-03-26 16:50:49 -0700977 if (mv_xor_status(dma_chan, cookie, NULL) !=
Vinod Koulb3efb8f2013-10-16 20:51:04 +0530978 DMA_COMPLETE) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100979 dev_err(dma_chan->device->dev,
980 "Self-test xor timed out, disabling\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700981 err = -ENODEV;
982 goto free_resources;
983 }
984
Thomas Petazzonic35064c2012-11-15 13:01:59 +0100985 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700986 PAGE_SIZE, DMA_FROM_DEVICE);
987 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
988 u32 *ptr = page_address(dest);
989 if (ptr[i] != cmp_word) {
Thomas Petazzonia3fc74b2012-11-15 12:50:27 +0100990 dev_err(dma_chan->device->dev,
Joe Perches1ba151c2012-10-28 01:05:44 -0700991 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
992 i, ptr[i], cmp_word);
Saeed Bisharaff7b0472008-07-08 11:58:36 -0700993 err = -ENODEV;
994 goto free_resources;
995 }
996 }
997
998free_resources:
Ezequiel Garciad16695a2013-12-10 09:32:36 -0300999 dmaengine_unmap_put(unmap);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001000 mv_xor_free_chan_resources(dma_chan);
1001out:
Ezequiel Garciad16695a2013-12-10 09:32:36 -03001002 src_idx = src_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001003 while (src_idx--)
1004 __free_page(xor_srcs[src_idx]);
1005 __free_page(dest);
1006 return err;
1007}
1008
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001009static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001010{
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001011 struct dma_chan *chan, *_chan;
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001012 struct device *dev = mv_chan->dmadev.dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001013
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001014 dma_async_device_unregister(&mv_chan->dmadev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001015
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001016 dma_free_coherent(dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001017 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Lior Amsalem22843542014-08-27 10:52:55 -03001018 dma_unmap_single(dev, mv_chan->dummy_src_addr,
1019 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1020 dma_unmap_single(dev, mv_chan->dummy_dst_addr,
1021 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001022
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001023 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001024 device_node) {
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001025 list_del(&chan->device_node);
1026 }
1027
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001028 free_irq(mv_chan->irq, mv_chan);
1029
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001030 return 0;
1031}
1032
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001033static struct mv_xor_chan *
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001034mv_xor_channel_add(struct mv_xor_device *xordev,
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001035 struct platform_device *pdev,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001036 int idx, dma_cap_mask_t cap_mask, int irq)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001037{
1038 int ret = 0;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001039 struct mv_xor_chan *mv_chan;
1040 struct dma_device *dma_dev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001041
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001042 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
Sachin Kamata5776592013-09-02 13:54:20 +05301043 if (!mv_chan)
1044 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001045
Thomas Petazzoni9aedbdb2012-11-15 15:36:37 +01001046 mv_chan->idx = idx;
Thomas Petazzoni88eb92c2012-11-15 16:11:18 +01001047 mv_chan->irq = irq;
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001048 if (xordev->xor_type == XOR_ORION)
1049 mv_chan->op_in_desc = XOR_MODE_IN_REG;
1050 else
1051 mv_chan->op_in_desc = XOR_MODE_IN_DESC;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001052
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001053 dma_dev = &mv_chan->dmadev;
Robin Murphy3e5daee2019-02-18 18:27:06 +00001054 dma_dev->dev = &pdev->dev;
Stefan Roese77ff7a72016-09-15 07:37:31 +02001055 mv_chan->xordev = xordev;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001056
Lior Amsalem22843542014-08-27 10:52:55 -03001057 /*
1058 * These source and destination dummy buffers are used to implement
1059 * a DMA_INTERRUPT operation as a minimum-sized XOR operation.
1060 * Hence, we only need to map the buffers at initialization-time.
1061 */
1062 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev,
1063 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
1064 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev,
1065 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
1066
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001067 /* allocate coherent memory for hardware descriptors
1068 * note: writecombine gives slightly better performance, but
1069 * requires that we explicitly flush the writes
1070 */
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001071 mv_chan->dma_desc_pool_virt =
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -08001072 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool,
1073 GFP_KERNEL);
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001074 if (!mv_chan->dma_desc_pool_virt)
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001075 return ERR_PTR(-ENOMEM);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001076
1077 /* discover transaction capabilites from the platform data */
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001078 dma_dev->cap_mask = cap_mask;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001079
1080 INIT_LIST_HEAD(&dma_dev->channels);
1081
1082 /* set base routines */
1083 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1084 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
Linus Walleij07934482010-03-26 16:50:49 -07001085 dma_dev->device_tx_status = mv_xor_status;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001086 dma_dev->device_issue_pending = mv_xor_issue_pending;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001087
1088 /* set prep routines based on capability */
Lior Amsalem22843542014-08-27 10:52:55 -03001089 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1090 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001091 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1092 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001093 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Joe Perchesc0198942009-06-28 09:26:21 -07001094 dma_dev->max_xor = 8;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001095 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1096 }
1097
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001098 mv_chan->mmr_base = xordev->xor_base;
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001099 mv_chan->mmr_high_base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001100 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1101 mv_chan);
1102
1103 /* clear errors before enabling interrupts */
Maxime Ripard0951e722015-05-26 15:07:33 +02001104 mv_chan_clear_err_status(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001105
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001106 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1107 0, dev_name(&pdev->dev), mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001108 if (ret)
1109 goto err_free_dma;
1110
1111 mv_chan_unmask_interrupts(mv_chan);
1112
Lior Amsalem6f166312015-05-26 15:07:34 +02001113 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC)
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001114 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC);
Lior Amsalem6f166312015-05-26 15:07:34 +02001115 else
Thomas Petazzoni81aafb32015-12-22 11:43:28 +01001116 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001117
1118 spin_lock_init(&mv_chan->lock);
1119 INIT_LIST_HEAD(&mv_chan->chain);
1120 INIT_LIST_HEAD(&mv_chan->completed_slots);
Lior Amsalemfbea28a2015-05-26 15:07:36 +02001121 INIT_LIST_HEAD(&mv_chan->free_slots);
1122 INIT_LIST_HEAD(&mv_chan->allocated_slots);
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001123 mv_chan->dmachan.device = dma_dev;
1124 dma_cookie_init(&mv_chan->dmachan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001125
Thomas Petazzoni98817b92012-11-15 14:57:44 +01001126 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001127
1128 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001129 ret = mv_chan_memcpy_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001130 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1131 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001132 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001133 }
1134
1135 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Maxime Ripard0951e722015-05-26 15:07:33 +02001136 ret = mv_chan_xor_self_test(mv_chan);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001137 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1138 if (ret)
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001139 goto err_free_irq;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001140 }
1141
Dave Jiangc678fa62017-08-21 10:23:13 -07001142 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n",
Lior Amsalem6f166312015-05-26 15:07:34 +02001143 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode",
Joe Perches1ba151c2012-10-28 01:05:44 -07001144 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
Joe Perches1ba151c2012-10-28 01:05:44 -07001145 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1146 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001147
Aditya Pakki7c973812018-12-24 11:41:54 -06001148 ret = dma_async_device_register(dma_dev);
1149 if (ret)
1150 goto err_free_irq;
1151
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001152 return mv_chan;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001153
Thomas Petazzoni2d0a0742012-11-22 18:19:09 +01001154err_free_irq:
1155 free_irq(mv_chan->irq, mv_chan);
Stefan Roesea4a1e53d2016-06-01 12:43:31 +02001156err_free_dma:
Thomas Petazzonib503fa02012-11-15 15:55:30 +01001157 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
Thomas Petazzoni1ef48a22012-11-15 15:17:05 +01001158 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
Thomas Petazzonia6b4a9d2012-10-29 16:45:46 +01001159 return ERR_PTR(ret);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001160}
1161
1162static void
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001163mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
Andrew Lunn63a93322011-12-07 21:48:07 +01001164 const struct mbus_dram_target_info *dram)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001165{
Ezequiel Garcia82a14022013-10-30 12:01:43 -03001166 void __iomem *base = xordev->xor_high_base;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001167 u32 win_enable = 0;
1168 int i;
1169
1170 for (i = 0; i < 8; i++) {
1171 writel(0, base + WINDOW_BASE(i));
1172 writel(0, base + WINDOW_SIZE(i));
1173 if (i < 4)
1174 writel(0, base + WINDOW_REMAP_HIGH(i));
1175 }
1176
1177 for (i = 0; i < dram->num_cs; i++) {
Andrew Lunn63a93322011-12-07 21:48:07 +01001178 const struct mbus_dram_window *cs = dram->cs + i;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001179
1180 writel((cs->base & 0xffff0000) |
1181 (cs->mbus_attr << 8) |
1182 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1183 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1184
Stefan Roese77ff7a72016-09-15 07:37:31 +02001185 /* Fill the caching variables for later use */
1186 xordev->win_start[i] = cs->base;
1187 xordev->win_end[i] = cs->base + cs->size - 1;
1188
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001189 win_enable |= (1 << i);
1190 win_enable |= 3 << (16 + (2 * i));
1191 }
1192
1193 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1194 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
Thomas Petazzonic4b4b732012-11-22 18:16:37 +01001195 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1196 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001197}
1198
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001199static void
1200mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev)
1201{
1202 void __iomem *base = xordev->xor_high_base;
1203 u32 win_enable = 0;
1204 int i;
1205
1206 for (i = 0; i < 8; i++) {
1207 writel(0, base + WINDOW_BASE(i));
1208 writel(0, base + WINDOW_SIZE(i));
1209 if (i < 4)
1210 writel(0, base + WINDOW_REMAP_HIGH(i));
1211 }
1212 /*
1213 * For Armada3700 open default 4GB Mbus window. The dram
1214 * related configuration are done at AXIS level.
1215 */
1216 writel(0xffff0000, base + WINDOW_SIZE(0));
1217 win_enable |= 1;
1218 win_enable |= 3 << 16;
1219
1220 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1221 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1222 writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1223 writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1224}
1225
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001226/*
1227 * Since this XOR driver is basically used only for RAID5, we don't
1228 * need to care about synchronizing ->suspend with DMA activity,
1229 * because the DMA engine will naturally be quiet due to the block
1230 * devices being suspended.
1231 */
1232static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state)
1233{
1234 struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1235 int i;
1236
1237 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1238 struct mv_xor_chan *mv_chan = xordev->channels[i];
1239
1240 if (!mv_chan)
1241 continue;
1242
1243 mv_chan->saved_config_reg =
1244 readl_relaxed(XOR_CONFIG(mv_chan));
1245 mv_chan->saved_int_mask_reg =
1246 readl_relaxed(XOR_INTR_MASK(mv_chan));
1247 }
1248
1249 return 0;
1250}
1251
1252static int mv_xor_resume(struct platform_device *dev)
1253{
1254 struct mv_xor_device *xordev = platform_get_drvdata(dev);
1255 const struct mbus_dram_target_info *dram;
1256 int i;
1257
1258 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1259 struct mv_xor_chan *mv_chan = xordev->channels[i];
1260
1261 if (!mv_chan)
1262 continue;
1263
1264 writel_relaxed(mv_chan->saved_config_reg,
1265 XOR_CONFIG(mv_chan));
1266 writel_relaxed(mv_chan->saved_int_mask_reg,
1267 XOR_INTR_MASK(mv_chan));
1268 }
1269
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001270 if (xordev->xor_type == XOR_ARMADA_37XX) {
1271 mv_xor_conf_mbus_windows_a3700(xordev);
1272 return 0;
1273 }
1274
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001275 dram = mv_mbus_dram_info();
1276 if (dram)
1277 mv_xor_conf_mbus_windows(xordev, dram);
1278
1279 return 0;
1280}
1281
Lior Amsalem6f166312015-05-26 15:07:34 +02001282static const struct of_device_id mv_xor_dt_ids[] = {
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001283 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION },
1284 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X },
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001285 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX },
Lior Amsalem6f166312015-05-26 15:07:34 +02001286 {},
1287};
Lior Amsalem6f166312015-05-26 15:07:34 +02001288
Thomas Petazzoni77757292015-07-08 16:28:19 +02001289static unsigned int mv_xor_engine_count;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001290
Linus Torvaldsc2714332012-12-14 14:54:26 -08001291static int mv_xor_probe(struct platform_device *pdev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001292{
Andrew Lunn63a93322011-12-07 21:48:07 +01001293 const struct mbus_dram_target_info *dram;
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001294 struct mv_xor_device *xordev;
Jingoo Hand4adcc02013-07-30 17:09:11 +09001295 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001296 struct resource *res;
Thomas Petazzoni77757292015-07-08 16:28:19 +02001297 unsigned int max_engines, max_channels;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001298 int i, ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001299
Joe Perches1ba151c2012-10-28 01:05:44 -07001300 dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001301
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001302 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1303 if (!xordev)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001304 return -ENOMEM;
1305
1306 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1307 if (!res)
1308 return -ENODEV;
1309
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001310 xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1311 resource_size(res));
1312 if (!xordev->xor_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001313 return -EBUSY;
1314
1315 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1316 if (!res)
1317 return -ENODEV;
1318
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001319 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1320 resource_size(res));
1321 if (!xordev->xor_high_base)
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001322 return -EBUSY;
1323
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001324 platform_set_drvdata(pdev, xordev);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001325
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001326
1327 /*
1328 * We need to know which type of XOR device we use before
1329 * setting up. In non-dt case it can only be the legacy one.
1330 */
1331 xordev->xor_type = XOR_ORION;
1332 if (pdev->dev.of_node) {
1333 const struct of_device_id *of_id =
1334 of_match_device(mv_xor_dt_ids,
1335 &pdev->dev);
1336
1337 xordev->xor_type = (uintptr_t)of_id->data;
1338 }
1339
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001340 /*
1341 * (Re-)program MBUS remapping windows if we are asked to.
1342 */
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001343 if (xordev->xor_type == XOR_ARMADA_37XX) {
1344 mv_xor_conf_mbus_windows_a3700(xordev);
1345 } else {
1346 dram = mv_mbus_dram_info();
1347 if (dram)
1348 mv_xor_conf_mbus_windows(xordev, dram);
1349 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001350
Andrew Lunnc5101822012-02-19 13:30:26 +01001351 /* Not all platforms can gate the clock, so it is not
1352 * an error if the clock does not exists.
1353 */
Thomas Petazzoni297eedba2012-11-15 15:29:53 +01001354 xordev->clk = clk_get(&pdev->dev, NULL);
1355 if (!IS_ERR(xordev->clk))
1356 clk_prepare_enable(xordev->clk);
Andrew Lunnc5101822012-02-19 13:30:26 +01001357
Thomas Petazzoni77757292015-07-08 16:28:19 +02001358 /*
1359 * We don't want to have more than one channel per CPU in
1360 * order for async_tx to perform well. So we limit the number
1361 * of engines and channels so that we take into account this
1362 * constraint. Note that we also want to use channels from
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001363 * separate engines when possible. For dual-CPU Armada 3700
1364 * SoC with single XOR engine allow using its both channels.
Thomas Petazzoni77757292015-07-08 16:28:19 +02001365 */
1366 max_engines = num_present_cpus();
Marcin Wojtasac5f0f32016-04-29 09:49:07 +02001367 if (xordev->xor_type == XOR_ARMADA_37XX)
1368 max_channels = num_present_cpus();
1369 else
1370 max_channels = min_t(unsigned int,
1371 MV_XOR_MAX_CHANNELS,
1372 DIV_ROUND_UP(num_present_cpus(), 2));
Thomas Petazzoni77757292015-07-08 16:28:19 +02001373
1374 if (mv_xor_engine_count >= max_engines)
1375 return 0;
1376
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001377 if (pdev->dev.of_node) {
1378 struct device_node *np;
1379 int i = 0;
1380
1381 for_each_child_of_node(pdev->dev.of_node, np) {
Russell King0be82532013-12-12 23:59:08 +00001382 struct mv_xor_chan *chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001383 dma_cap_mask_t cap_mask;
1384 int irq;
1385
Thomas Petazzoni77757292015-07-08 16:28:19 +02001386 if (i >= max_channels)
1387 continue;
1388
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001389 dma_cap_zero(cap_mask);
Thomas Petazzoni6d8f7ab2015-07-08 16:28:16 +02001390 dma_cap_set(DMA_MEMCPY, cap_mask);
1391 dma_cap_set(DMA_XOR, cap_mask);
1392 dma_cap_set(DMA_INTERRUPT, cap_mask);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001393
1394 irq = irq_of_parse_and_map(np, 0);
Thomas Petazzonif8eb9e72012-11-22 18:22:12 +01001395 if (!irq) {
1396 ret = -ENODEV;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001397 goto err_channel_add;
1398 }
1399
Russell King0be82532013-12-12 23:59:08 +00001400 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001401 cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001402 if (IS_ERR(chan)) {
1403 ret = PTR_ERR(chan);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001404 irq_dispose_mapping(irq);
1405 goto err_channel_add;
1406 }
1407
Russell King0be82532013-12-12 23:59:08 +00001408 xordev->channels[i] = chan;
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001409 i++;
1410 }
1411 } else if (pdata && pdata->channels) {
Thomas Petazzoni77757292015-07-08 16:28:19 +02001412 for (i = 0; i < max_channels; i++) {
Thomas Petazzonie39f6ec2012-10-30 11:56:26 +01001413 struct mv_xor_channel_data *cd;
Russell King0be82532013-12-12 23:59:08 +00001414 struct mv_xor_chan *chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001415 int irq;
1416
1417 cd = &pdata->channels[i];
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001418 irq = platform_get_irq(pdev, i);
1419 if (irq < 0) {
1420 ret = irq;
1421 goto err_channel_add;
1422 }
1423
Russell King0be82532013-12-12 23:59:08 +00001424 chan = mv_xor_channel_add(xordev, pdev, i,
Gregory CLEMENTdd130c62016-04-29 09:49:06 +02001425 cd->cap_mask, irq);
Russell King0be82532013-12-12 23:59:08 +00001426 if (IS_ERR(chan)) {
1427 ret = PTR_ERR(chan);
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001428 goto err_channel_add;
1429 }
Russell King0be82532013-12-12 23:59:08 +00001430
1431 xordev->channels[i] = chan;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001432 }
1433 }
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001434
1435 return 0;
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001436
1437err_channel_add:
1438 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001439 if (xordev->channels[i]) {
Thomas Petazzoniab6e4392013-01-06 11:10:43 +01001440 mv_xor_channel_remove(xordev->channels[i]);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001441 if (pdev->dev.of_node)
1442 irq_dispose_mapping(xordev->channels[i]->irq);
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001443 }
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001444
Thomas Petazzonidab92062013-01-06 11:10:44 +01001445 if (!IS_ERR(xordev->clk)) {
1446 clk_disable_unprepare(xordev->clk);
1447 clk_put(xordev->clk);
1448 }
1449
Thomas Petazzoni60d151f2012-10-29 16:54:49 +01001450 return ret;
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001451}
1452
Thomas Petazzoni61971652012-10-30 12:05:40 +01001453static struct platform_driver mv_xor_driver = {
1454 .probe = mv_xor_probe,
Thomas Petazzoni8b648432015-12-22 11:43:29 +01001455 .suspend = mv_xor_suspend,
1456 .resume = mv_xor_resume,
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001457 .driver = {
Thomas Petazzonif7d12ef2012-11-15 16:47:58 +01001458 .name = MV_XOR_NAME,
1459 .of_match_table = of_match_ptr(mv_xor_dt_ids),
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001460 },
1461};
1462
Geliang Tang812608d2016-11-18 22:12:26 +08001463builtin_platform_driver(mv_xor_driver);
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001464
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001465/*
Saeed Bisharaff7b0472008-07-08 11:58:36 -07001466MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1467MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1468MODULE_LICENSE("GPL");
Paul Gortmaker25cf68d2015-08-21 16:27:49 -04001469*/