blob: 54d581d407aa72077a3ab7b1feaefd13b88d7cc5 [file] [log] [blame]
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001/*
2 * DMA Engine test module
3 *
4 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02005 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
Dan Williams872f05c2013-11-06 16:29:58 -080011#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070013#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000014#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070015#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020016#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/init.h>
18#include <linux/kthread.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010019#include <linux/sched/task.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070020#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070024#include <linux/wait.h>
25
26static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030027module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070028MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
29
Kay Sievers06190d82008-11-11 13:12:33 -070030static char test_channel[20];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030031module_param_string(channel, test_channel, sizeof(test_channel),
32 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070033MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
34
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +010035static char test_device[32];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030036module_param_string(device, test_device, sizeof(test_device),
37 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070038MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
39
40static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030041module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070042MODULE_PARM_DESC(threads_per_chan,
43 "Number of threads to start per channel (default: 1)");
44
45static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030046module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070047MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070048 "Maximum number of channels to use (default: all)");
49
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020050static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030051module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020052MODULE_PARM_DESC(iterations,
53 "Iterations before stopping test (default: infinite)");
54
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053055static unsigned int sg_buffers = 1;
56module_param(sg_buffers, uint, S_IRUGO | S_IWUSR);
57MODULE_PARM_DESC(sg_buffers,
58 "Number of scatter gather buffers (default: 1)");
59
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030060static unsigned int dmatest;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053061module_param(dmatest, uint, S_IRUGO | S_IWUSR);
62MODULE_PARM_DESC(dmatest,
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030063 "dmatest 0-memcpy 1-slave_sg (default: 0)");
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053064
Dan Williamsb54d5cb2009-03-25 09:13:25 -070065static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030066module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070067MODULE_PARM_DESC(xor_sources,
68 "Number of xor source buffers (default: 3)");
69
Dan Williams58691d62009-08-29 19:09:27 -070070static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030071module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070072MODULE_PARM_DESC(pq_sources,
73 "Number of p+q source buffers (default: 3)");
74
Viresh Kumard42efe62011-03-22 17:27:25 +053075static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030076module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070077MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
78 "Pass -1 for infinite timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053079
Dan Williamse3b9c342013-11-06 16:30:05 -080080static bool noverify;
81module_param(noverify, bool, S_IRUGO | S_IWUSR);
82MODULE_PARM_DESC(noverify, "Disable random data setup and verification");
Andy Shevchenko74b5c072013-03-04 11:09:32 +020083
Dan Williams50137a72013-11-08 12:26:26 -080084static bool verbose;
85module_param(verbose, bool, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070087
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020088/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020089 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020090 * @buf_size: size of the memcpy test buffer
91 * @channel: bus ID of the channel to test
92 * @device: bus ID of the DMA Engine to test
93 * @threads_per_chan: number of threads to start per channel
94 * @max_channels: maximum number of channels to use
95 * @iterations: iterations before stopping test
96 * @xor_sources: number of xor source buffers
97 * @pq_sources: number of p+q source buffers
98 * @timeout: transfer timeout in msec, -1 for infinite timeout
99 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200100struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200101 unsigned int buf_size;
102 char channel[20];
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +0100103 char device[32];
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200104 unsigned int threads_per_chan;
105 unsigned int max_channels;
106 unsigned int iterations;
107 unsigned int xor_sources;
108 unsigned int pq_sources;
109 int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800110 bool noverify;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200111};
112
113/**
114 * struct dmatest_info - test information.
115 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200116 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200117 */
Dan Williamsa310d032013-11-06 16:30:01 -0800118static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200119 /* Test parameters */
120 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200121
122 /* Internal state */
123 struct list_head channels;
124 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200125 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800126 bool did_init;
127} test_info = {
128 .channels = LIST_HEAD_INIT(test_info.channels),
129 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200130};
131
Dan Williamsa310d032013-11-06 16:30:01 -0800132static int dmatest_run_set(const char *val, const struct kernel_param *kp);
133static int dmatest_run_get(char *val, const struct kernel_param *kp);
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930134static const struct kernel_param_ops run_ops = {
Dan Williamsa310d032013-11-06 16:30:01 -0800135 .set = dmatest_run_set,
136 .get = dmatest_run_get,
137};
138static bool dmatest_run;
139module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
140MODULE_PARM_DESC(run, "Run the test (default: false)");
141
142/* Maximum amount of mismatched bytes in buffer to print */
143#define MAX_ERROR_COUNT 32
144
145/*
146 * Initialization patterns. All bytes in the source buffer has bit 7
147 * set, all bytes in the destination buffer has bit 7 cleared.
148 *
149 * Bit 6 is set for all bytes which are to be copied by the DMA
150 * engine. Bit 5 is set for all bytes which are to be overwritten by
151 * the DMA engine.
152 *
153 * The remaining bits are the inverse of a counter which increments by
154 * one for each byte address.
155 */
156#define PATTERN_SRC 0x80
157#define PATTERN_DST 0x00
158#define PATTERN_COPY 0x40
159#define PATTERN_OVERWRITE 0x20
160#define PATTERN_COUNT_MASK 0x1f
161
162struct dmatest_thread {
163 struct list_head node;
164 struct dmatest_info *info;
165 struct task_struct *task;
166 struct dma_chan *chan;
167 u8 **srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700168 u8 **usrcs;
Dan Williamsa310d032013-11-06 16:30:01 -0800169 u8 **dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700170 u8 **udsts;
Dan Williamsa310d032013-11-06 16:30:01 -0800171 enum dma_transaction_type type;
172 bool done;
173};
174
175struct dmatest_chan {
176 struct list_head node;
177 struct dma_chan *chan;
178 struct list_head threads;
179};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200180
Dan Williams2d88ce72013-11-06 16:30:09 -0800181static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
182static bool wait;
183
184static bool is_threaded_test_run(struct dmatest_info *info)
185{
186 struct dmatest_chan *dtc;
187
188 list_for_each_entry(dtc, &info->channels, node) {
189 struct dmatest_thread *thread;
190
191 list_for_each_entry(thread, &dtc->threads, node) {
192 if (!thread->done)
193 return true;
194 }
195 }
196
197 return false;
198}
199
200static int dmatest_wait_get(char *val, const struct kernel_param *kp)
201{
202 struct dmatest_info *info = &test_info;
203 struct dmatest_params *params = &info->params;
204
205 if (params->iterations)
206 wait_event(thread_wait, !is_threaded_test_run(info));
207 wait = true;
208 return param_get_bool(val, kp);
209}
210
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930211static const struct kernel_param_ops wait_ops = {
Dan Williams2d88ce72013-11-06 16:30:09 -0800212 .get = dmatest_wait_get,
213 .set = param_set_bool,
214};
215module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
216MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700217
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200218static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200219 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700220{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200221 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700222 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200223 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700224}
225
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200226static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200227 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700228{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200229 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700230 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200231 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700232}
233
234static unsigned long dmatest_random(void)
235{
236 unsigned long buf;
237
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800238 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700239 return buf;
240}
241
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200242static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
243 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700244{
245 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700246 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700247
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700248 for (; (buf = *bufs); bufs++) {
249 for (i = 0; i < start; i++)
250 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
251 for ( ; i < start + len; i++)
252 buf[i] = PATTERN_SRC | PATTERN_COPY
Joe Perchesc0198942009-06-28 09:26:21 -0700253 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200254 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700255 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
256 buf++;
257 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700258}
259
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200260static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
261 unsigned int buf_size)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700262{
263 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700264 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700265
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700266 for (; (buf = *bufs); bufs++) {
267 for (i = 0; i < start; i++)
268 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
269 for ( ; i < start + len; i++)
270 buf[i] = PATTERN_DST | PATTERN_OVERWRITE
271 | (~i & PATTERN_COUNT_MASK);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200272 for ( ; i < buf_size; i++)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700273 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
274 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700275}
276
Dan Williams7b610172013-11-06 16:29:57 -0800277static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
278 unsigned int counter, bool is_srcbuf)
279{
280 u8 diff = actual ^ pattern;
281 u8 expected = pattern | (~counter & PATTERN_COUNT_MASK);
282 const char *thread_name = current->comm;
283
284 if (is_srcbuf)
285 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
286 thread_name, index, expected, actual);
287 else if ((pattern & PATTERN_COPY)
288 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
289 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
290 thread_name, index, expected, actual);
291 else if (diff & PATTERN_SRC)
292 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
293 thread_name, index, expected, actual);
294 else
295 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
296 thread_name, index, expected, actual);
297}
298
299static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
300 unsigned int end, unsigned int counter, u8 pattern,
301 bool is_srcbuf)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700302{
303 unsigned int i;
304 unsigned int error_count = 0;
305 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700306 u8 expected;
307 u8 *buf;
308 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700309
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700310 for (; (buf = *bufs); bufs++) {
311 counter = counter_orig;
312 for (i = start; i < end; i++) {
313 actual = buf[i];
314 expected = pattern | (~counter & PATTERN_COUNT_MASK);
315 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800316 if (error_count < MAX_ERROR_COUNT)
317 dmatest_mismatch(actual, pattern, i,
318 counter, is_srcbuf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700319 error_count++;
320 }
321 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700322 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700323 }
324
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200325 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800326 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200327 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700328
329 return error_count;
330}
331
Tejun Heoadfa5432011-11-23 09:28:16 -0800332/* poor man's completion - we want to use wait_event_freezable() on it */
333struct dmatest_done {
334 bool done;
335 wait_queue_head_t *wait;
336};
337
338static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700339{
Tejun Heoadfa5432011-11-23 09:28:16 -0800340 struct dmatest_done *done = arg;
341
342 done->done = true;
343 wake_up_all(done->wait);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700344}
345
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900346static unsigned int min_odd(unsigned int x, unsigned int y)
347{
348 unsigned int val = min(x, y);
349
350 return val % 2 ? val : val - 1;
351}
352
Dan Williams872f05c2013-11-06 16:29:58 -0800353static void result(const char *err, unsigned int n, unsigned int src_off,
354 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200355{
Jerome Blin2acec152014-03-04 10:38:55 +0100356 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Dan Williams872f05c2013-11-06 16:29:58 -0800357 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200358}
359
Dan Williams872f05c2013-11-06 16:29:58 -0800360static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
361 unsigned int dst_off, unsigned int len,
362 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200363{
Jerome Blin2acec152014-03-04 10:38:55 +0100364 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300365 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200366}
367
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300368#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
369 if (verbose) \
370 result(err, n, src_off, dst_off, len, data); \
371 else \
372 dbg_result(err, n, src_off, dst_off, len, data);\
Dan Williams50137a72013-11-08 12:26:26 -0800373})
374
Dan Williams86727442013-11-06 16:30:07 -0800375static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200376{
Dan Williams86727442013-11-06 16:30:07 -0800377 unsigned long long per_sec = 1000000;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200378
Dan Williams86727442013-11-06 16:30:07 -0800379 if (runtime <= 0)
380 return 0;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200381
Dan Williams86727442013-11-06 16:30:07 -0800382 /* drop precision until runtime is 32-bits */
383 while (runtime > UINT_MAX) {
384 runtime >>= 1;
385 per_sec <<= 1;
386 }
Andy Shevchenko95019c82013-03-04 11:09:33 +0200387
Dan Williams86727442013-11-06 16:30:07 -0800388 per_sec *= val;
389 do_div(per_sec, runtime);
390 return per_sec;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200391}
392
Dan Williams86727442013-11-06 16:30:07 -0800393static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200394{
Dan Williams86727442013-11-06 16:30:07 -0800395 return dmatest_persec(runtime, len >> 10);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200396}
397
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700398/*
399 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700400 * offsets for a given operation type until it is told to exit by
401 * kthread_stop(). There may be multiple threads running this function
402 * in parallel for a single channel, and there may be multiple channels
403 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700404 *
405 * Before each test, the source and destination buffer is initialized
406 * with a known pattern. This pattern is different depending on
407 * whether it's in an area which is supposed to be copied or
408 * overwritten, and different in the source and destination buffers.
409 * So if the DMA engine doesn't copy exactly what we tell it to copy,
410 * we'll notice.
411 */
412static int dmatest_func(void *data)
413{
Tejun Heoadfa5432011-11-23 09:28:16 -0800414 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700415 struct dmatest_thread *thread = data;
Tejun Heoadfa5432011-11-23 09:28:16 -0800416 struct dmatest_done done = { .wait = &done_wait };
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200417 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200418 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700419 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900420 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700421 unsigned int error_count;
422 unsigned int failed_tests = 0;
423 unsigned int total_tests = 0;
424 dma_cookie_t cookie;
425 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700426 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200427 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700428 int ret;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700429 int src_cnt;
430 int dst_cnt;
431 int i;
Sinan Kayae9405ef2016-09-01 10:02:55 -0400432 ktime_t ktime, start, diff;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100433 ktime_t filltime = 0;
434 ktime_t comparetime = 0;
Dan Williams86727442013-11-06 16:30:07 -0800435 s64 runtime = 0;
436 unsigned long long total_len = 0;
Dave Jiangd6481602016-11-29 13:22:20 -0700437 u8 align = 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700438
Tejun Heoadfa5432011-11-23 09:28:16 -0800439 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700440
441 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700442
443 smp_rmb();
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200444 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200445 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700446 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900447 dev = chan->device;
Dave Jiangd6481602016-11-29 13:22:20 -0700448 if (thread->type == DMA_MEMCPY) {
449 align = dev->copy_align;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700450 src_cnt = dst_cnt = 1;
Dave Jiangd6481602016-11-29 13:22:20 -0700451 } else if (thread->type == DMA_SG) {
452 align = dev->copy_align;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530453 src_cnt = dst_cnt = sg_buffers;
Dave Jiangd6481602016-11-29 13:22:20 -0700454 } else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900455 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200456 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700457 dst_cnt = 1;
Dave Jiangd6481602016-11-29 13:22:20 -0700458 align = dev->xor_align;
Dan Williams58691d62009-08-29 19:09:27 -0700459 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900460 /* force odd to ensure dst = src */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200461 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
Dan Williams58691d62009-08-29 19:09:27 -0700462 dst_cnt = 2;
Dave Jiangd6481602016-11-29 13:22:20 -0700463 align = dev->pq_align;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200464
Dave Jiang31d18252016-11-29 13:22:01 -0700465 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200466 if (!pq_coefs)
467 goto err_thread_type;
468
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100469 for (i = 0; i < src_cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700470 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700471 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200472 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700473
Dave Jiang31d18252016-11-29 13:22:01 -0700474 thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700475 if (!thread->srcs)
476 goto err_srcs;
Dave Jiangd6481602016-11-29 13:22:20 -0700477
478 thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL);
479 if (!thread->usrcs)
480 goto err_usrcs;
481
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700482 for (i = 0; i < src_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700483 thread->usrcs[i] = kmalloc(params->buf_size + align,
484 GFP_KERNEL);
485 if (!thread->usrcs[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700486 goto err_srcbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700487
488 /* align srcs to alignment restriction */
489 if (align)
490 thread->srcs[i] = PTR_ALIGN(thread->usrcs[i], align);
491 else
492 thread->srcs[i] = thread->usrcs[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700493 }
494 thread->srcs[i] = NULL;
495
Dave Jiang31d18252016-11-29 13:22:01 -0700496 thread->dsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700497 if (!thread->dsts)
498 goto err_dsts;
Dave Jiangd6481602016-11-29 13:22:20 -0700499
500 thread->udsts = kcalloc(dst_cnt + 1, sizeof(u8 *), GFP_KERNEL);
501 if (!thread->udsts)
502 goto err_udsts;
503
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700504 for (i = 0; i < dst_cnt; i++) {
Dave Jiangd6481602016-11-29 13:22:20 -0700505 thread->udsts[i] = kmalloc(params->buf_size + align,
506 GFP_KERNEL);
507 if (!thread->udsts[i])
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700508 goto err_dstbuf;
Dave Jiangd6481602016-11-29 13:22:20 -0700509
510 /* align dsts to alignment restriction */
511 if (align)
512 thread->dsts[i] = PTR_ALIGN(thread->udsts[i], align);
513 else
514 thread->dsts[i] = thread->udsts[i];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700515 }
516 thread->dsts[i] = NULL;
517
Dan Williamse44e0aa2009-03-25 09:13:25 -0700518 set_user_nice(current, 10);
519
Ira Snyderb203bd32011-03-03 07:54:53 +0000520 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200521 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000522 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200523 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700524
Dan Williams86727442013-11-06 16:30:07 -0800525 ktime = ktime_get();
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200526 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200527 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700528 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams4076e752013-11-06 16:30:10 -0800529 struct dmaengine_unmap_data *um;
530 dma_addr_t srcs[src_cnt];
531 dma_addr_t *dsts;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300532 unsigned int src_off, dst_off, len;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530533 struct scatterlist tx_sg[src_cnt];
534 struct scatterlist rx_sg[src_cnt];
Atsushi Nemotod86be862009-01-13 09:22:20 -0700535
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700536 total_tests++;
537
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200538 if (1 << align > params->buf_size) {
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100539 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200540 params->buf_size, 1 << align);
Guennadi Liakhovetskicfe4f272009-12-04 19:44:48 +0100541 break;
542 }
543
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300544 if (params->noverify)
Dan Williamse3b9c342013-11-06 16:30:05 -0800545 len = params->buf_size;
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300546 else
547 len = dmatest_random() % params->buf_size + 1;
548
549 len = (len >> align) << align;
550 if (!len)
551 len = 1 << align;
552
553 total_len += len;
554
555 if (params->noverify) {
Dan Williamse3b9c342013-11-06 16:30:05 -0800556 src_off = 0;
557 dst_off = 0;
558 } else {
Sinan Kayae9405ef2016-09-01 10:02:55 -0400559 start = ktime_get();
Dan Williamse3b9c342013-11-06 16:30:05 -0800560 src_off = dmatest_random() % (params->buf_size - len + 1);
561 dst_off = dmatest_random() % (params->buf_size - len + 1);
562
563 src_off = (src_off >> align) << align;
564 dst_off = (dst_off >> align) << align;
565
566 dmatest_init_srcs(thread->srcs, src_off, len,
567 params->buf_size);
568 dmatest_init_dsts(thread->dsts, dst_off, len,
569 params->buf_size);
Sinan Kayae9405ef2016-09-01 10:02:55 -0400570
571 diff = ktime_sub(ktime_get(), start);
572 filltime = ktime_add(filltime, diff);
Dan Williamse3b9c342013-11-06 16:30:05 -0800573 }
574
Dave Jiang31d18252016-11-29 13:22:01 -0700575 um = dmaengine_get_unmap_data(dev->dev, src_cnt + dst_cnt,
Dan Williams4076e752013-11-06 16:30:10 -0800576 GFP_KERNEL);
577 if (!um) {
578 failed_tests++;
579 result("unmap data NULL", total_tests,
580 src_off, dst_off, len, ret);
581 continue;
582 }
Dan Williams83544ae2009-09-08 17:42:53 -0700583
Dan Williams4076e752013-11-06 16:30:10 -0800584 um->len = params->buf_size;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700585 for (i = 0; i < src_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800586 void *buf = thread->srcs[i];
Dan Williams4076e752013-11-06 16:30:10 -0800587 struct page *pg = virt_to_page(buf);
Dan Williams745c00d2013-12-09 11:16:01 -0800588 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700589
Dan Williams4076e752013-11-06 16:30:10 -0800590 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
591 um->len, DMA_TO_DEVICE);
592 srcs[i] = um->addr[i] + src_off;
593 ret = dma_mapping_error(dev->dev, um->addr[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800594 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800595 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800596 result("src mapping error", total_tests,
597 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800598 failed_tests++;
599 continue;
600 }
Dan Williams4076e752013-11-06 16:30:10 -0800601 um->to_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700602 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700603 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Dan Williams4076e752013-11-06 16:30:10 -0800604 dsts = &um->addr[src_cnt];
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700605 for (i = 0; i < dst_cnt; i++) {
Dan Williams745c00d2013-12-09 11:16:01 -0800606 void *buf = thread->dsts[i];
Dan Williams4076e752013-11-06 16:30:10 -0800607 struct page *pg = virt_to_page(buf);
Dan Williams745c00d2013-12-09 11:16:01 -0800608 unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
Dan Williams4076e752013-11-06 16:30:10 -0800609
610 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
611 DMA_BIDIRECTIONAL);
612 ret = dma_mapping_error(dev->dev, dsts[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800613 if (ret) {
Dan Williams4076e752013-11-06 16:30:10 -0800614 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800615 result("dst mapping error", total_tests,
616 src_off, dst_off, len, ret);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800617 failed_tests++;
618 continue;
619 }
Dan Williams4076e752013-11-06 16:30:10 -0800620 um->bidi_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700621 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700622
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530623 sg_init_table(tx_sg, src_cnt);
624 sg_init_table(rx_sg, src_cnt);
625 for (i = 0; i < src_cnt; i++) {
626 sg_dma_address(&rx_sg[i]) = srcs[i];
627 sg_dma_address(&tx_sg[i]) = dsts[i] + dst_off;
628 sg_dma_len(&tx_sg[i]) = len;
629 sg_dma_len(&rx_sg[i]) = len;
630 }
631
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700632 if (thread->type == DMA_MEMCPY)
633 tx = dev->device_prep_dma_memcpy(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800634 dsts[0] + dst_off,
635 srcs[0], len, flags);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530636 else if (thread->type == DMA_SG)
637 tx = dev->device_prep_dma_sg(chan, tx_sg, src_cnt,
638 rx_sg, src_cnt, flags);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700639 else if (thread->type == DMA_XOR)
640 tx = dev->device_prep_dma_xor(chan,
Dan Williams4076e752013-11-06 16:30:10 -0800641 dsts[0] + dst_off,
642 srcs, src_cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700643 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700644 else if (thread->type == DMA_PQ) {
645 dma_addr_t dma_pq[dst_cnt];
646
647 for (i = 0; i < dst_cnt; i++)
Dan Williams4076e752013-11-06 16:30:10 -0800648 dma_pq[i] = dsts[i] + dst_off;
649 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
Anatolij Gustschin94de6482010-02-15 22:35:23 +0100650 src_cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700651 len, flags);
652 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700653
Atsushi Nemotod86be862009-01-13 09:22:20 -0700654 if (!tx) {
Dan Williams4076e752013-11-06 16:30:10 -0800655 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800656 result("prep error", total_tests, src_off,
657 dst_off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700658 msleep(100);
659 failed_tests++;
660 continue;
661 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700662
Tejun Heoadfa5432011-11-23 09:28:16 -0800663 done.done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700664 tx->callback = dmatest_callback;
Tejun Heoadfa5432011-11-23 09:28:16 -0800665 tx->callback_param = &done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700666 cookie = tx->tx_submit(tx);
667
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700668 if (dma_submit_error(cookie)) {
Dan Williams4076e752013-11-06 16:30:10 -0800669 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800670 result("submit error", total_tests, src_off,
671 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700672 msleep(100);
673 failed_tests++;
674 continue;
675 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700676 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700677
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300678 wait_event_freezable_timeout(done_wait, done.done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200679 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200680
Dan Williamse44e0aa2009-03-25 09:13:25 -0700681 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700682
Tejun Heoadfa5432011-11-23 09:28:16 -0800683 if (!done.done) {
684 /*
685 * We're leaving the timed out dma operation with
686 * dangling pointer to done_wait. To make this
687 * correct, we'll need to allocate wait_done for
688 * each test iteration and perform "who's gonna
689 * free it this time?" dancing. For now, just
690 * leave it dangling.
691 */
Dan Williams4076e752013-11-06 16:30:10 -0800692 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800693 result("test timed out", total_tests, src_off, dst_off,
694 len, 0);
Dan Williamse44e0aa2009-03-25 09:13:25 -0700695 failed_tests++;
696 continue;
Vinod Koul19e9f992013-10-16 13:37:27 +0530697 } else if (status != DMA_COMPLETE) {
Dan Williams4076e752013-11-06 16:30:10 -0800698 dmaengine_unmap_put(um);
Dan Williams872f05c2013-11-06 16:29:58 -0800699 result(status == DMA_ERROR ?
700 "completion error status" :
701 "completion busy status", total_tests, src_off,
702 dst_off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700703 failed_tests++;
704 continue;
705 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700706
Dan Williams4076e752013-11-06 16:30:10 -0800707 dmaengine_unmap_put(um);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700708
Dan Williamse3b9c342013-11-06 16:30:05 -0800709 if (params->noverify) {
Dan Williams50137a72013-11-08 12:26:26 -0800710 verbose_result("test passed", total_tests, src_off,
711 dst_off, len, 0);
Dan Williamse3b9c342013-11-06 16:30:05 -0800712 continue;
713 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700714
Sinan Kayae9405ef2016-09-01 10:02:55 -0400715 start = ktime_get();
Dan Williams872f05c2013-11-06 16:29:58 -0800716 pr_debug("%s: verifying source buffer...\n", current->comm);
Dan Williamse3b9c342013-11-06 16:30:05 -0800717 error_count = dmatest_verify(thread->srcs, 0, src_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700718 0, PATTERN_SRC, true);
Dan Williams7b610172013-11-06 16:29:57 -0800719 error_count += dmatest_verify(thread->srcs, src_off,
720 src_off + len, src_off,
721 PATTERN_SRC | PATTERN_COPY, true);
722 error_count += dmatest_verify(thread->srcs, src_off + len,
723 params->buf_size, src_off + len,
724 PATTERN_SRC, true);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700725
Dan Williams872f05c2013-11-06 16:29:58 -0800726 pr_debug("%s: verifying dest buffer...\n", current->comm);
Dan Williams7b610172013-11-06 16:29:57 -0800727 error_count += dmatest_verify(thread->dsts, 0, dst_off,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700728 0, PATTERN_DST, false);
Dan Williams7b610172013-11-06 16:29:57 -0800729 error_count += dmatest_verify(thread->dsts, dst_off,
730 dst_off + len, src_off,
731 PATTERN_SRC | PATTERN_COPY, false);
732 error_count += dmatest_verify(thread->dsts, dst_off + len,
733 params->buf_size, dst_off + len,
734 PATTERN_DST, false);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700735
Sinan Kayae9405ef2016-09-01 10:02:55 -0400736 diff = ktime_sub(ktime_get(), start);
737 comparetime = ktime_add(comparetime, diff);
738
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700739 if (error_count) {
Dan Williams872f05c2013-11-06 16:29:58 -0800740 result("data error", total_tests, src_off, dst_off,
741 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700742 failed_tests++;
743 } else {
Dan Williams50137a72013-11-08 12:26:26 -0800744 verbose_result("test passed", total_tests, src_off,
745 dst_off, len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700746 }
747 }
Sinan Kayae9405ef2016-09-01 10:02:55 -0400748 ktime = ktime_sub(ktime_get(), ktime);
749 ktime = ktime_sub(ktime, comparetime);
750 ktime = ktime_sub(ktime, filltime);
751 runtime = ktime_to_us(ktime);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700752
753 ret = 0;
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300754err_dstbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700755 for (i = 0; thread->udsts[i]; i++)
756 kfree(thread->udsts[i]);
757 kfree(thread->udsts);
758err_udsts:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700759 kfree(thread->dsts);
760err_dsts:
Andy Shevchenko8e1f50d2014-08-22 15:19:44 +0300761err_srcbuf:
Dave Jiangd6481602016-11-29 13:22:20 -0700762 for (i = 0; thread->usrcs[i]; i++)
763 kfree(thread->usrcs[i]);
764 kfree(thread->usrcs);
765err_usrcs:
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700766 kfree(thread->srcs);
767err_srcs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200768 kfree(pq_coefs);
769err_thread_type:
Dan Williams86727442013-11-06 16:30:07 -0800770 pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n",
771 current->comm, total_tests, failed_tests,
772 dmatest_persec(runtime, total_tests),
773 dmatest_KBs(runtime, total_len), ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200774
Viresh Kumar9704efa2011-07-29 16:21:57 +0530775 /* terminate all transfers on specified channels */
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000776 if (ret)
777 dmaengine_terminate_all(chan);
778
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200779 thread->done = true;
Dan Williams2d88ce72013-11-06 16:30:09 -0800780 wake_up(&thread_wait);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200781
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700782 return ret;
783}
784
785static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
786{
787 struct dmatest_thread *thread;
788 struct dmatest_thread *_thread;
789 int ret;
790
791 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
792 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800793 pr_debug("thread %s exited with status %d\n",
794 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700795 list_del(&thread->node);
Dan Williams2d88ce72013-11-06 16:30:09 -0800796 put_task_struct(thread->task);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700797 kfree(thread);
798 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530799
800 /* terminate all transfers on specified channels */
Jon Mason944ea4d2012-11-11 23:03:20 +0000801 dmaengine_terminate_all(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530802
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700803 kfree(dtc);
804}
805
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200806static int dmatest_add_threads(struct dmatest_info *info,
807 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700808{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200809 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700810 struct dmatest_thread *thread;
811 struct dma_chan *chan = dtc->chan;
812 char *op;
813 unsigned int i;
814
815 if (type == DMA_MEMCPY)
816 op = "copy";
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530817 else if (type == DMA_SG)
818 op = "sg";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700819 else if (type == DMA_XOR)
820 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700821 else if (type == DMA_PQ)
822 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700823 else
824 return -EINVAL;
825
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200826 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700827 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
828 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800829 pr_warn("No memory for %s-%s%u\n",
830 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700831 break;
832 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200833 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700834 thread->chan = dtc->chan;
835 thread->type = type;
836 smp_wmb();
Dan Williams2d88ce72013-11-06 16:30:09 -0800837 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700838 dma_chan_name(chan), op, i);
839 if (IS_ERR(thread->task)) {
Dan Williams2d88ce72013-11-06 16:30:09 -0800840 pr_warn("Failed to create thread %s-%s%u\n",
Dan Williams0adff802013-11-06 16:30:00 -0800841 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700842 kfree(thread);
843 break;
844 }
845
846 /* srcbuf and dstbuf are allocated by the thread itself */
Dan Williams2d88ce72013-11-06 16:30:09 -0800847 get_task_struct(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700848 list_add_tail(&thread->node, &dtc->threads);
Dan Williams2d88ce72013-11-06 16:30:09 -0800849 wake_up_process(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700850 }
851
852 return i;
853}
854
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200855static int dmatest_add_channel(struct dmatest_info *info,
856 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700857{
858 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700859 struct dma_device *dma_dev = chan->device;
860 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400861 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700862
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700863 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700864 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800865 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700866 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700867 }
868
869 dtc->chan = chan;
870 INIT_LIST_HEAD(&dtc->threads);
871
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700872 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530873 if (dmatest == 0) {
874 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
875 thread_count += cnt > 0 ? cnt : 0;
876 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700877 }
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530878
879 if (dma_has_cap(DMA_SG, dma_dev->cap_mask)) {
880 if (dmatest == 1) {
881 cnt = dmatest_add_threads(info, dtc, DMA_SG);
882 thread_count += cnt > 0 ? cnt : 0;
883 }
884 }
885
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700886 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200887 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200888 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700889 }
Dan Williams58691d62009-08-29 19:09:27 -0700890 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200891 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -0700892 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -0700893 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700894
Dan Williams0adff802013-11-06 16:30:00 -0800895 pr_info("Started %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700896 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700897
Andy Shevchenko838cc702013-03-04 11:09:28 +0200898 list_add_tail(&dtc->node, &info->channels);
899 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700900
Dan Williams33df8ca2009-01-06 11:38:15 -0700901 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700902}
903
Dan Williams7dd60252009-01-06 11:38:19 -0700904static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700905{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200906 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200907
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200908 if (!dmatest_match_channel(params, chan) ||
909 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -0700910 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -0700911 else
Dan Williams7dd60252009-01-06 11:38:19 -0700912 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700913}
914
Dan Williamsa9e55492013-11-06 16:30:02 -0800915static void request_channels(struct dmatest_info *info,
916 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700917{
Dan Williams33df8ca2009-01-06 11:38:15 -0700918 dma_cap_mask_t mask;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700919
Dan Williams33df8ca2009-01-06 11:38:15 -0700920 dma_cap_zero(mask);
Dan Williamsa9e55492013-11-06 16:30:02 -0800921 dma_cap_set(type, mask);
Dan Williams33df8ca2009-01-06 11:38:15 -0700922 for (;;) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800923 struct dmatest_params *params = &info->params;
924 struct dma_chan *chan;
925
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200926 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -0700927 if (chan) {
Dan Williamsa9e55492013-11-06 16:30:02 -0800928 if (dmatest_add_channel(info, chan)) {
Dan Williams33df8ca2009-01-06 11:38:15 -0700929 dma_release_channel(chan);
930 break; /* add_channel failed, punt */
931 }
932 } else
933 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200934 if (params->max_channels &&
935 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -0700936 break; /* we have all we need */
937 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700938}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700939
Dan Williamsa9e55492013-11-06 16:30:02 -0800940static void run_threaded_test(struct dmatest_info *info)
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200941{
942 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200943
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200944 /* Copy test parameters */
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +0300945 params->buf_size = test_buf_size;
946 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
947 strlcpy(params->device, strim(test_device), sizeof(params->device));
948 params->threads_per_chan = threads_per_chan;
949 params->max_channels = max_channels;
950 params->iterations = iterations;
951 params->xor_sources = xor_sources;
952 params->pq_sources = pq_sources;
953 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800954 params->noverify = noverify;
Dan Williamsa310d032013-11-06 16:30:01 -0800955
Dan Williamsa9e55492013-11-06 16:30:02 -0800956 request_channels(info, DMA_MEMCPY);
957 request_channels(info, DMA_XOR);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530958 request_channels(info, DMA_SG);
Dan Williamsa9e55492013-11-06 16:30:02 -0800959 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700960}
961
Dan Williamsa310d032013-11-06 16:30:01 -0800962static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700963{
964 struct dmatest_chan *dtc, *_dtc;
965 struct dma_chan *chan;
966
967 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
968 list_del(&dtc->node);
969 chan = dtc->chan;
970 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -0800971 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200972 dma_release_channel(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700973 }
Dan Williams33df8ca2009-01-06 11:38:15 -0700974
Dan Williams7cbd4872009-03-04 16:06:03 -0700975 info->nr_channels = 0;
Dan Williams33df8ca2009-01-06 11:38:15 -0700976}
Andy Shevchenko838cc702013-03-04 11:09:28 +0200977
Dan Williamsa9e55492013-11-06 16:30:02 -0800978static void restart_threaded_test(struct dmatest_info *info, bool run)
Dan Williams7cbd4872009-03-04 16:06:03 -0700979{
Dan Williamsa310d032013-11-06 16:30:01 -0800980 /* we might be called early to set run=, defer running until all
981 * parameters have been evaluated
982 */
983 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -0800984 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200985
Dan Williamsa310d032013-11-06 16:30:01 -0800986 /* Stop any running test first */
987 stop_threaded_test(info);
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200988
989 /* Run test with new parameters */
Dan Williamsa9e55492013-11-06 16:30:02 -0800990 run_threaded_test(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300991}
992
Dan Williamsa310d032013-11-06 16:30:01 -0800993static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +0300994{
Dan Williamsa310d032013-11-06 16:30:01 -0800995 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200996
997 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -0800998 if (is_threaded_test_run(info)) {
999 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001000 } else {
Dan Williamsa310d032013-11-06 16:30:01 -08001001 stop_threaded_test(info);
1002 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001003 }
Dan Williamsa310d032013-11-06 16:30:01 -08001004 mutex_unlock(&info->lock);
1005
1006 return param_get_bool(val, kp);
1007}
1008
1009static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1010{
1011 struct dmatest_info *info = &test_info;
1012 int ret;
1013
1014 mutex_lock(&info->lock);
1015 ret = param_set_bool(val, kp);
1016 if (ret) {
1017 mutex_unlock(&info->lock);
1018 return ret;
1019 }
1020
1021 if (is_threaded_test_run(info))
1022 ret = -EBUSY;
1023 else if (dmatest_run)
Dan Williamsa9e55492013-11-06 16:30:02 -08001024 restart_threaded_test(info, dmatest_run);
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001025
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001026 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001027
Dan Williamsa310d032013-11-06 16:30:01 -08001028 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001029}
1030
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001031static int __init dmatest_init(void)
1032{
1033 struct dmatest_info *info = &test_info;
Dan Williams2d88ce72013-11-06 16:30:09 -08001034 struct dmatest_params *params = &info->params;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001035
Dan Williamsa310d032013-11-06 16:30:01 -08001036 if (dmatest_run) {
1037 mutex_lock(&info->lock);
Dan Williamsa9e55492013-11-06 16:30:02 -08001038 run_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001039 mutex_unlock(&info->lock);
1040 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001041
Dan Williams2d88ce72013-11-06 16:30:09 -08001042 if (params->iterations && wait)
1043 wait_event(thread_wait, !is_threaded_test_run(info));
Andy Shevchenko838cc702013-03-04 11:09:28 +02001044
Dan Williamsa310d032013-11-06 16:30:01 -08001045 /* module parameters are stable, inittime tests are started,
1046 * let userspace take over 'run' control
1047 */
1048 info->did_init = true;
Andy Shevchenko95019c82013-03-04 11:09:33 +02001049
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001050 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001051}
1052/* when compiled-in wait for drivers to load first */
1053late_initcall(dmatest_init);
1054
1055static void __exit dmatest_exit(void)
1056{
1057 struct dmatest_info *info = &test_info;
1058
Dan Williamsa310d032013-11-06 16:30:01 -08001059 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001060 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001061 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001062}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001063module_exit(dmatest_exit);
1064
Jean Delvaree05503e2011-05-18 16:49:24 +02001065MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001066MODULE_LICENSE("GPL v2");