blob: d07b73275d0f2292c5aa1941ede6effb45d0a1b0 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070023#include <linux/wait.h>
24
25static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030026module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070027MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
28
Kay Sievers06190d82008-11-11 13:12:33 -070029static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030030module_param_string(channel, test_channel, sizeof(test_channel),
31 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070032MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
33
Kay Sievers06190d82008-11-11 13:12:33 -070034static char test_device[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030035module_param_string(device, test_device, sizeof(test_device),
36 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070037MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
38
39static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030040module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070041MODULE_PARM_DESC(threads_per_chan,
42 "Number of threads to start per channel (default: 1)");
43
44static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030045module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070046MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070047 "Maximum number of channels to use (default: all)");
48
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020049static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030050module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020051MODULE_PARM_DESC(iterations,
52 "Iterations before stopping test (default: infinite)");
53
Dan Williamsb54d5cb2009-03-25 09:13:25 -070054static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030055module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070056MODULE_PARM_DESC(xor_sources,
57 "Number of xor source buffers (default: 3)");
58
Dan Williams58691d62009-08-29 19:09:27 -070059static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030060module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070061MODULE_PARM_DESC(pq_sources,
62 "Number of p+q source buffers (default: 3)");
63
Viresh Kumard42efe62011-03-22 17:27:25 +053064static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030065module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070066MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
67 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053068
Dan Williamse3b9c342013-11-06 16:30:05 -080069static bool noverify;
70module_param(noverify, bool, S_IRUGO | S_IWUSR);
71MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
72
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020073/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020074 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020075 * @buf_size: size of the memcpy test buffer
76 * @channel: bus ID of the channel to test
77 * @device: bus ID of the DMA Engine to test
78 * @threads_per_chan: number of threads to start per channel
79 * @max_channels: maximum number of channels to use
80 * @iterations: iterations before stopping test
81 * @xor_sources: number of xor source buffers
82 * @pq_sources: number of p+q source buffers
83 * @timeout: transfer timeout in msec, -1 for infinite timeout
84 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020085struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020086 unsigned int buf_size;
87 char channel[20];
88 char device[20];
89 unsigned int threads_per_chan;
90 unsigned int max_channels;
91 unsigned int iterations;
92 unsigned int xor_sources;
93 unsigned int pq_sources;
94 int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -080095 bool noverify;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020096};
97
98/**
99 * struct dmatest_info - test information.
100 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200101 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200102 */
Dan Williamsa310d032013-11-06 16:30:01 -0800103static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200104 /* Test parameters */
105 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200106
107 /* Internal state */
108 struct list_head channels;
109 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200110 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800111 bool did_init;
112} test_info = {
113 .channels = LIST_HEAD_INIT(test_info.channels),
114 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200115};
116
Dan Williamsa310d032013-11-06 16:30:01 -0800117static int dmatest_run_set(const char *val, const struct kernel_param *kp);
118static int dmatest_run_get(char *val, const struct kernel_param *kp);
119static struct kernel_param_ops run_ops = {
120 .set = dmatest_run_set,
121 .get = dmatest_run_get,
122};
123static bool dmatest_run;
124module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
125MODULE_PARM_DESC(run, "Run the test (default: false)");
126
127/* Maximum amount of mismatched bytes in buffer to print */
128#define MAX_ERROR_COUNT 32
129
130/*
131 * Initialization patterns. All bytes in the source buffer has bit 7
132 * set, all bytes in the destination buffer has bit 7 cleared.
133 *
134 * Bit 6 is set for all bytes which are to be copied by the DMA
135 * engine. Bit 5 is set for all bytes which are to be overwritten by
136 * the DMA engine.
137 *
138 * The remaining bits are the inverse of a counter which increments by
139 * one for each byte address.
140 */
141#define PATTERN_SRC 0x80
142#define PATTERN_DST 0x00
143#define PATTERN_COPY 0x40
144#define PATTERN_OVERWRITE 0x20
145#define PATTERN_COUNT_MASK 0x1f
146
147struct dmatest_thread {
148 struct list_head node;
149 struct dmatest_info *info;
150 struct task_struct *task;
151 struct dma_chan *chan;
152 u8 **srcs;
153 u8 **dsts;
154 enum dma_transaction_type type;
155 bool done;
156};
157
158struct dmatest_chan {
159 struct list_head node;
160 struct dma_chan *chan;
161 struct list_head threads;
162};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200163
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200164static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200165 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700166{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200167 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700168 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200169 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700170}
171
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200172static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200173 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700174{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200175 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700176 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200177 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700178}
179
180static unsigned long dmatest_random(void)
181{
182 unsigned long buf;
183
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800184 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700185 return buf;
186}
187
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200188static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
189 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700190{
191 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700192 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700193
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700194 for (; (buf = *bufs); bufs++) {
195 for (i = 0; i < start; i++)
196 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
197 for ( ; i < start + len; i++)
198 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700199 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200200 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700201 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
202 buf++;
203 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700204}
205
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200206static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
207 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700208{
209 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700210 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700211
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700212 for (; (buf = *bufs); bufs++) {
213 for (i = 0; i < start; i++)
214 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
215 for ( ; i < start + len; i++)
216 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
217 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200218 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700219 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
220 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700221}
222
Dan Williams7b610172013-11-06 16:29:57 -0800223static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
224 unsigned int counter, bool is_srcbuf)
225{
226 u8 diff = actual ^ pattern;
227 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
228 const char *thread_name = current->comm;
229
230 if (is_srcbuf)
231 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
232 thread_name, index, expected, actual);
233 else if ((pattern & PATTERN_COPY)
234 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
235 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
236 thread_name, index, expected, actual);
237 else if (diff & PATTERN_SRC)
238 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
239 thread_name, index, expected, actual);
240 else
241 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
242 thread_name, index, expected, actual);
243}
244
245static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
246 unsigned int end, unsigned int counter, u8 pattern,
247 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700248{
249 unsigned int i;
250 unsigned int error_count = 0;
251 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700252 u8 expected;
253 u8 *buf;
254 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700255
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700256 for (; (buf = *bufs); bufs++) {
257 counter = counter_orig;
258 for (i = start; i < end; i++) {
259 actual = buf[i];
260 expected = pattern | (~counter & PATTERN_COUNT_MASK);
261 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800262 if (error_count < MAX_ERROR_COUNT)
263 dmatest_mismatch(actual, pattern, i,
264 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700265 error_count++;
266 }
267 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700268 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700269 }
270
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200271 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800272 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200273 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700274
275 return error_count;
276}
277
Tejun Heoadfa5432011-11-23 09:28:16 -0800278/* poor man's completion - we want to use wait_event_freezable() on it */
279struct dmatest_done {
280 bool done;
281 wait_queue_head_t *wait;
282};
283
284static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700285{
Tejun Heoadfa5432011-11-23 09:28:16 -0800286 struct dmatest_done *done = arg;
287
288 done->done = true;
289 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700290}
291
Andy Shevchenko632fd282012-12-17 15:59:52 -0800292static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
293 unsigned int count)
294{
295 while (count--)
296 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
297}
298
299static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
300 unsigned int count)
301{
302 while (count--)
303 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
304}
305
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900306static unsigned int min_odd(unsigned int x, unsigned int y)
307{
308 unsigned int val = min(x, y);
309
310 return val % 2 ? val : val - 1;
311}
312
Dan Williams872f05c2013-11-06 16:29:58 -0800313static void result(const char *err, unsigned int n, unsigned int src_off,
314 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200315{
Dan Williams872f05c2013-11-06 16:29:58 -0800316 pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
317 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200318}
319
Dan Williams872f05c2013-11-06 16:29:58 -0800320static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
321 unsigned int dst_off, unsigned int len,
322 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200323{
Dan Williams872f05c2013-11-06 16:29:58 -0800324 pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
325 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200326}
327
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700328/*
329 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700330 * offsets for a given operation type until it is told to exit by
331 * kthread_stop(). There may be multiple threads running this function
332 * in parallel for a single channel, and there may be multiple channels
333 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700334 *
335 * Before each test, the source and destination buffer is initialized
336 * with a known pattern. This pattern is different depending on
337 * whether it's in an area which is supposed to be copied or
338 * overwritten, and different in the source and destination buffers.
339 * So if the DMA engine doesn't copy exactly what we tell it to copy,
340 * we'll notice.
341 */
342static int dmatest_func(void *data)
343{
Tejun Heoadfa5432011-11-23 09:28:16 -0800344 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700345 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800346 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200347 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200348 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700349 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900350 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700351 unsigned int src_off, dst_off, len;
352 unsigned int error_count;
353 unsigned int failed_tests = 0;
354 unsigned int total_tests = 0;
355 dma_cookie_t cookie;
356 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700357 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200358 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700359 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700360 int src_cnt;
361 int dst_cnt;
362 int i;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700363
Tejun Heoadfa5432011-11-23 09:28:16 -0800364 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700365
366 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700367
368 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200369 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200370 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700371 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900372 dev = chan->device;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700373 if (thread->type == DMA_MEMCPY)
374 src_cnt = dst_cnt = 1;
375 else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900376 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200377 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700378 dst_cnt = 1;
Dan Williams58691d62009-08-29 19:09:27 -0700379 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900380 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200381 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700382 dst_cnt = 2;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200383
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200384 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200385 if (!pq_coefs)
386 goto err_thread_type;
387
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100388 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700389 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700390 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200391 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700392
393 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
394 if (!thread->srcs)
395 goto err_srcs;
396 for (i = 0; i < src_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200397 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700398 if (!thread->srcs[i])
399 goto err_srcbuf;
400 }
401 thread->srcs[i] = NULL;
402
403 thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
404 if (!thread->dsts)
405 goto err_dsts;
406 for (i = 0; i < dst_cnt; i++) {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200407 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700408 if (!thread->dsts[i])
409 goto err_dstbuf;
410 }
411 thread->dsts[i] = NULL;
412
Dan Williamse44e0aa2009-03-25 09:13:25 -0700413 set_user_nice(current, 10);
414
Ira Snyderb203bd32011-03-03 07:54:53 +0000415 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200416 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000417 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200418 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700419
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200420 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200421 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700422 struct dma_async_tx_descriptor *tx = NULL;
423 dma_addr_t dma_srcs[src_cnt];
424 dma_addr_t dma_dsts[dst_cnt];
Dan Williams83544ae2009-09-08 17:42:53 -0700425 u8 align = 0;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700426
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700427 total_tests++;
428
Dan Williams83544ae2009-09-08 17:42:53 -0700429 /* honor alignment restrictions */
430 if (thread->type == DMA_MEMCPY)
431 align = dev->copy_align;
432 else if (thread->type == DMA_XOR)
433 align = dev->xor_align;
434 else if (thread->type == DMA_PQ)
435 align = dev->pq_align;
436
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200437 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100438 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200439 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100440 break;
441 }
442
Dan Williamse3b9c342013-11-06 16:30:05 -0800443 if (params->noverify) {
444 len = params->buf_size;
445 src_off = 0;
446 dst_off = 0;
447 } else {
448 len = dmatest_random() % params->buf_size + 1;
449 len = (len >> align) << align;
450 if (!len)
451 len = 1 << align;
452 src_off = dmatest_random() % (params->buf_size - len + 1);
453 dst_off = dmatest_random() % (params->buf_size - len + 1);
454
455 src_off = (src_off >> align) << align;
456 dst_off = (dst_off >> align) << align;
457
458 dmatest_init_srcs(thread->srcs, src_off, len,
459 params->buf_size);
460 dmatest_init_dsts(thread->dsts, dst_off, len,
461 params->buf_size);
462 }
463
Dan Williams83544ae2009-09-08 17:42:53 -0700464 len = (len >> align) << align;
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100465 if (!len)
466 len = 1 << align;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700467
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700468 for (i = 0; i < src_cnt; i++) {
469 u8 *buf = thread->srcs[i] + src_off;
470
471 dma_srcs[i] = dma_map_single(dev->dev, buf, len,
472 DMA_TO_DEVICE);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800473 ret = dma_mapping_error(dev->dev, dma_srcs[i]);
474 if (ret) {
475 unmap_src(dev->dev, dma_srcs, len, i);
Dan Williams872f05c2013-11-06 16:29:58 -0800476 result("src mapping error", total_tests,
477 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800478 failed_tests++;
479 continue;
480 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700481 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700482 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700483 for (i = 0; i < dst_cnt; i++) {
484 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200485 params->buf_size,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700486 DMA_BIDIRECTIONAL);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800487 ret = dma_mapping_error(dev->dev, dma_dsts[i]);
488 if (ret) {
489 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200490 unmap_dst(dev->dev, dma_dsts, params->buf_size,
491 i);
Dan Williams872f05c2013-11-06 16:29:58 -0800492 result("dst mapping error", total_tests,
493 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800494 failed_tests++;
495 continue;
496 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700497 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700498
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700499 if (thread->type == DMA_MEMCPY)
500 tx = dev->device_prep_dma_memcpy(chan,
501 dma_dsts[0] + dst_off,
502 dma_srcs[0], len,
503 flags);
504 else if (thread->type == DMA_XOR)
505 tx = dev->device_prep_dma_xor(chan,
506 dma_dsts[0] + dst_off,
Dan Williams67b91242010-02-28 22:20:18 -0700507 dma_srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700508 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700509 else if (thread->type == DMA_PQ) {
510 dma_addr_t dma_pq[dst_cnt];
511
512 for (i = 0; i < dst_cnt; i++)
513 dma_pq[i] = dma_dsts[i] + dst_off;
514 tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100515 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700516 len, flags);
517 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700518
Atsushi Nemotod86be862009-01-13 09:22:20 -0700519 if (!tx) {
Andy Shevchenko632fd282012-12-17 15:59:52 -0800520 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200521 unmap_dst(dev->dev, dma_dsts, params->buf_size,
522 dst_cnt);
Dan Williams872f05c2013-11-06 16:29:58 -0800523 result("prep error", total_tests, src_off,
524 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700525 msleep(100);
526 failed_tests++;
527 continue;
528 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700529
Tejun Heoadfa5432011-11-23 09:28:16 -0800530 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700531 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800532 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700533 cookie = tx->tx_submit(tx);
534
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700535 if (dma_submit_error(cookie)) {
Dan Williams872f05c2013-11-06 16:29:58 -0800536 result("submit error", total_tests, src_off,
537 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700538 msleep(100);
539 failed_tests++;
540 continue;
541 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700542 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700543
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300544 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200545 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200546
Dan Williamse44e0aa2009-03-25 09:13:25 -0700547 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700548
Tejun Heoadfa5432011-11-23 09:28:16 -0800549 if (!done.done) {
550 /*
551 * We're leaving the timed out dma operation with
552 * dangling pointer to done_wait. To make this
553 * correct, we'll need to allocate wait_done for
554 * each test iteration and perform "who's gonna
555 * free it this time?" dancing. For now, just
556 * leave it dangling.
557 */
Dan Williams872f05c2013-11-06 16:29:58 -0800558 result("test timed out", total_tests, src_off, dst_off,
559 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700560 failed_tests++;
561 continue;
562 } else if (status != DMA_SUCCESS) {
Dan Williams872f05c2013-11-06 16:29:58 -0800563 result(status == DMA_ERROR ?
564 "completion error status" :
565 "completion busy status", total_tests, src_off,
566 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700567 failed_tests++;
568 continue;
569 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700570
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200571 /* Unmap by myself */
572 unmap_src(dev->dev, dma_srcs, len, src_cnt);
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200573 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700574
Dan Williamse3b9c342013-11-06 16:30:05 -0800575 if (params->noverify) {
576 dbg_result("test passed", total_tests, src_off, dst_off,
577 len, 0);
578 continue;
579 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700580
Dan Williams872f05c2013-11-06 16:29:58 -0800581 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williamse3b9c342013-11-06 16:30:05 -0800582 error_count = dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700583 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800584 error_count += dmatest_verify(thread->srcs, src_off,
585 src_off + len, src_off,
586 PATTERN_SRC | PATTERN_COPY, true);
587 error_count += dmatest_verify(thread->srcs, src_off + len,
588 params->buf_size, src_off + len,
589 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700590
Dan Williams872f05c2013-11-06 16:29:58 -0800591 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800592 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700593 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800594 error_count += dmatest_verify(thread->dsts, dst_off,
595 dst_off + len, src_off,
596 PATTERN_SRC | PATTERN_COPY, false);
597 error_count += dmatest_verify(thread->dsts, dst_off + len,
598 params->buf_size, dst_off + len,
599 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700600
601 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800602 result("data error", total_tests, src_off, dst_off,
603 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700604 failed_tests++;
605 } else {
Dan Williams872f05c2013-11-06 16:29:58 -0800606 dbg_result("test passed", total_tests, src_off, dst_off,
607 len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700608 }
609 }
610
611 ret = 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700612 for (i = 0; thread->dsts[i]; i++)
613 kfree(thread->dsts[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700614err_dstbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700615 kfree(thread->dsts);
616err_dsts:
617 for (i = 0; thread->srcs[i]; i++)
618 kfree(thread->srcs[i]);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700619err_srcbuf:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700620 kfree(thread->srcs);
621err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200622 kfree(pq_coefs);
623err_thread_type:
Dan Williams872f05c2013-11-06 16:29:58 -0800624 pr_info("%s: terminating after %u tests, %u failures (status %d)\n",
625 current->comm, total_tests, failed_tests, ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200626
Viresh Kumar9704efa2011-07-29 16:21:57 +0530627 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000628 if (ret)
629 dmaengine_terminate_all(chan);
630
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200631 thread->done = true;
632
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200633 if (params->iterations > 0)
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200634 while (!kthread_should_stop()) {
Yong Zhangb953df72010-02-05 21:52:37 +0800635 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200636 interruptible_sleep_on(&wait_dmatest_exit);
637 }
638
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700639 return ret;
640}
641
642static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
643{
644 struct dmatest_thread *thread;
645 struct dmatest_thread *_thread;
646 int ret;
647
648 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
649 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800650 pr_debug("thread %s exited with status %d\n",
651 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700652 list_del(&thread->node);
653 kfree(thread);
654 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530655
656 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000657 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530658
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700659 kfree(dtc);
660}
661
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200662static int dmatest_add_threads(struct dmatest_info *info,
663 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700664{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200665 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700666 struct dmatest_thread *thread;
667 struct dma_chan *chan = dtc->chan;
668 char *op;
669 unsigned int i;
670
671 if (type == DMA_MEMCPY)
672 op = "copy";
673 else if (type == DMA_XOR)
674 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700675 else if (type == DMA_PQ)
676 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700677 else
678 return -EINVAL;
679
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200680 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700681 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
682 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800683 pr_warn("No memory for %s-%s%u\n",
684 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700685 break;
686 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200687 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700688 thread->chan = dtc->chan;
689 thread->type = type;
690 smp_wmb();
691 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
692 dma_chan_name(chan), op, i);
693 if (IS_ERR(thread->task)) {
Dan Williams0adff802013-11-06 16:30:00 -0800694 pr_warn("Failed to run thread %s-%s%u\n",
695 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700696 kfree(thread);
697 break;
698 }
699
700 /* srcbuf and dstbuf are allocated by the thread itself */
701
702 list_add_tail(&thread->node, &dtc->threads);
703 }
704
705 return i;
706}
707
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200708static int dmatest_add_channel(struct dmatest_info *info,
709 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700710{
711 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700712 struct dma_device *dma_dev = chan->device;
713 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400714 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700715
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700716 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700717 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800718 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700719 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700720 }
721
722 dtc->chan = chan;
723 INIT_LIST_HEAD(&dtc->threads);
724
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700725 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200726 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200727 thread_count += cnt > 0 ? cnt : 0;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700728 }
729 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200730 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200731 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700732 }
Dan Williams58691d62009-08-29 19:09:27 -0700733 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200734 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700735 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700736 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700737
Dan Williams0adff802013-11-06 16:30:00 -0800738 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700739 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700740
Andy Shevchenko838cc702013-03-04 11:09:28 +0200741 list_add_tail(&dtc->node, &info->channels);
742 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700743
Dan Williams33df8ca2009-01-06 11:38:15 -0700744 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700745}
746
Dan Williams7dd60252009-01-06 11:38:19 -0700747static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700748{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200749 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200750
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200751 if (!dmatest_match_channel(params, chan) ||
752 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700753 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700754 else
Dan Williams7dd60252009-01-06 11:38:19 -0700755 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700756}
757
Dan Williamsa9e55492013-11-06 16:30:02 -0800758static void request_channels(struct dmatest_info *info,
759 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700760{
Dan Williams33df8ca2009-01-06 11:38:15 -0700761 dma_cap_mask_t mask;
Dan Williamsa9e55492013-11-06 16:30:02 -0800762
763 dma_cap_zero(mask);
764 dma_cap_set(type, mask);
765 for (;;) {
766 struct dmatest_params *params = &info->params;
767 struct dma_chan *chan;
768
769 chan = dma_request_channel(mask, filter, params);
770 if (chan) {
771 if (dmatest_add_channel(info, chan)) {
772 dma_release_channel(chan);
773 break; /* add_channel failed, punt */
774 }
775 } else
776 break; /* no more channels available */
777 if (params->max_channels &&
778 info->nr_channels >= params->max_channels)
779 break; /* we have all we need */
780 }
781}
782
783static void run_threaded_test(struct dmatest_info *info)
784{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200785 struct dmatest_params *params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700786
Dan Williamsa310d032013-11-06 16:30:01 -0800787 /* Copy test parameters */
788 params->buf_size = test_buf_size;
789 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
790 strlcpy(params->device, strim(test_device), sizeof(params->device));
791 params->threads_per_chan = threads_per_chan;
792 params->max_channels = max_channels;
793 params->iterations = iterations;
794 params->xor_sources = xor_sources;
795 params->pq_sources = pq_sources;
796 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800797 params->noverify = noverify;
Dan Williamsa310d032013-11-06 16:30:01 -0800798
Dan Williamsa9e55492013-11-06 16:30:02 -0800799 request_channels(info, DMA_MEMCPY);
800 request_channels(info, DMA_XOR);
801 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700802}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700803
Dan Williamsa310d032013-11-06 16:30:01 -0800804static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700805{
Dan Williams33df8ca2009-01-06 11:38:15 -0700806 struct dmatest_chan *dtc, *_dtc;
Dan Williams7cbd4872009-03-04 16:06:03 -0700807 struct dma_chan *chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700808
Andy Shevchenko838cc702013-03-04 11:09:28 +0200809 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700810 list_del(&dtc->node);
Dan Williams7cbd4872009-03-04 16:06:03 -0700811 chan = dtc->chan;
Dan Williams33df8ca2009-01-06 11:38:15 -0700812 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -0800813 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Dan Williams7cbd4872009-03-04 16:06:03 -0700814 dma_release_channel(chan);
Dan Williams33df8ca2009-01-06 11:38:15 -0700815 }
Andy Shevchenko838cc702013-03-04 11:09:28 +0200816
817 info->nr_channels = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700818}
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200819
Dan Williamsa9e55492013-11-06 16:30:02 -0800820static void restart_threaded_test(struct dmatest_info *info, bool run)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200821{
Dan Williamsa310d032013-11-06 16:30:01 -0800822 /* we might be called early to set run=, defer running until all
823 * parameters have been evaluated
824 */
825 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -0800826 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200827
Dan Williamsa310d032013-11-06 16:30:01 -0800828 /* Stop any running test first */
829 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200830
831 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -0800832 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300833}
834
Dan Williamsa310d032013-11-06 16:30:01 -0800835static bool is_threaded_test_run(struct dmatest_info *info)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300836{
837 struct dmatest_chan *dtc;
838
839 list_for_each_entry(dtc, &info->channels, node) {
840 struct dmatest_thread *thread;
841
842 list_for_each_entry(thread, &dtc->threads, node) {
843 if (!thread->done)
844 return true;
845 }
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200846 }
847
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300848 return false;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200849}
850
Dan Williamsa310d032013-11-06 16:30:01 -0800851static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200852{
Dan Williamsa310d032013-11-06 16:30:01 -0800853 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200854
855 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -0800856 if (is_threaded_test_run(info)) {
857 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200858 } else {
Dan Williamsa310d032013-11-06 16:30:01 -0800859 stop_threaded_test(info);
860 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200861 }
Dan Williamsa310d032013-11-06 16:30:01 -0800862 mutex_unlock(&info->lock);
863
864 return param_get_bool(val, kp);
865}
866
867static int dmatest_run_set(const char *val, const struct kernel_param *kp)
868{
869 struct dmatest_info *info = &test_info;
870 int ret;
871
872 mutex_lock(&info->lock);
873 ret = param_set_bool(val, kp);
874 if (ret) {
875 mutex_unlock(&info->lock);
876 return ret;
877 }
878
879 if (is_threaded_test_run(info))
880 ret = -EBUSY;
881 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -0800882 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200883
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200884 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200885
Dan Williamsa310d032013-11-06 16:30:01 -0800886 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200887}
888
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200889static int __init dmatest_init(void)
890{
891 struct dmatest_info *info = &test_info;
892
Dan Williamsa310d032013-11-06 16:30:01 -0800893 if (dmatest_run) {
894 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -0800895 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800896 mutex_unlock(&info->lock);
897 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200898
Dan Williamsa310d032013-11-06 16:30:01 -0800899 /* module parameters are stable, inittime tests are started,
900 * let userspace take over 'run' control
901 */
902 info->did_init = true;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200903
Dan Williamsa9e55492013-11-06 16:30:02 -0800904 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200905}
906/* when compiled-in wait for drivers to load first */
907late_initcall(dmatest_init);
908
909static void __exit dmatest_exit(void)
910{
911 struct dmatest_info *info = &test_info;
912
Dan Williamsa310d032013-11-06 16:30:01 -0800913 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200914 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -0800915 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200916}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700917module_exit(dmatest_exit);
918
Jean Delvaree05503e2011-05-18 16:49:24 +0200919MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700920MODULE_LICENSE("GPL v2");