blob: 3d22ae8dca7203ab797646ed6e972aedeb84a32b [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07002/*
3 * DMA Engine test module
4 *
5 * Copyright (C) 2007 Atmel Corporation
Andy Shevchenko851b7e12013-03-04 11:09:30 +02006 * Copyright (C) 2013 Intel Corporation
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07007 */
Dan Williams872f05c2013-11-06 16:29:58 -08008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070010#include <linux/delay.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000011#include <linux/dma-mapping.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070012#include <linux/dmaengine.h>
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +020013#include <linux/freezer.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070014#include <linux/init.h>
15#include <linux/kthread.h>
Ingo Molnar0881e7b2017-02-05 15:30:50 +010016#include <linux/sched/task.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070017#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/random.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070021#include <linux/wait.h>
22
23static unsigned int test_buf_size = 16384;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030024module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070025MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
26
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +010027static char test_device[32];
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030028module_param_string(device, test_device, sizeof(test_device),
29 S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070030MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
31
32static unsigned int threads_per_chan = 1;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030033module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070034MODULE_PARM_DESC(threads_per_chan,
35 "Number of threads to start per channel (default: 1)");
36
37static unsigned int max_channels;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030038module_param(max_channels, uint, S_IRUGO | S_IWUSR);
Dan Williams33df8ca2009-01-06 11:38:15 -070039MODULE_PARM_DESC(max_channels,
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070040 "Maximum number of channels to use (default: all)");
41
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020042static unsigned int iterations;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030043module_param(iterations, uint, S_IRUGO | S_IWUSR);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +020044MODULE_PARM_DESC(iterations,
45 "Iterations before stopping test (default: infinite)");
46
Eugeniy Paltsevd8646722016-09-14 20:40:38 +030047static unsigned int dmatest;
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053048module_param(dmatest, uint, S_IRUGO | S_IWUSR);
49MODULE_PARM_DESC(dmatest,
Dave Jiangc678fa62017-08-21 10:23:13 -070050 "dmatest 0-memcpy 1-memset (default: 0)");
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +053051
Dan Williamsb54d5cb2009-03-25 09:13:25 -070052static unsigned int xor_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030053module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
Dan Williamsb54d5cb2009-03-25 09:13:25 -070054MODULE_PARM_DESC(xor_sources,
55 "Number of xor source buffers (default: 3)");
56
Dan Williams58691d62009-08-29 19:09:27 -070057static unsigned int pq_sources = 3;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030058module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
Dan Williams58691d62009-08-29 19:09:27 -070059MODULE_PARM_DESC(pq_sources,
60 "Number of p+q source buffers (default: 3)");
61
Viresh Kumard42efe62011-03-22 17:27:25 +053062static int timeout = 3000;
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +030063module_param(timeout, uint, S_IRUGO | S_IWUSR);
Joe Perches85ee7a12011-04-23 20:38:19 -070064MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
Hook, Garyed04b7c2019-06-18 22:03:04 +000065 "Pass 0xFFFFFFFF (4294967295) for maximum timeout");
Viresh Kumard42efe62011-03-22 17:27:25 +053066
Dan Williamse3b9c342013-11-06 16:30:05 -080067static bool noverify;
68module_param(noverify, bool, S_IRUGO | S_IWUSR);
Yang Shunyong2e67a082018-02-02 17:51:09 +080069MODULE_PARM_DESC(noverify, "Disable data verification (default: verify)");
70
71static bool norandom;
72module_param(norandom, bool, 0644);
73MODULE_PARM_DESC(norandom, "Disable random offset setup (default: random)");
Andy Shevchenko74b5c072013-03-04 11:09:32 +020074
Dan Williams50137a72013-11-08 12:26:26 -080075static bool verbose;
76module_param(verbose, bool, S_IRUGO | S_IWUSR);
77MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -070078
Seraj Alijana875abf2018-12-10 08:52:37 +000079static int alignment = -1;
80module_param(alignment, int, 0644);
81MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))");
82
Seraj Alijan13396a12018-12-10 08:52:39 +000083static unsigned int transfer_size;
84module_param(transfer_size, uint, 0644);
85MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))");
86
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020087/**
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020088 * struct dmatest_params - test parameters.
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020089 * @buf_size: size of the memcpy test buffer
90 * @channel: bus ID of the channel to test
91 * @device: bus ID of the DMA Engine to test
92 * @threads_per_chan: number of threads to start per channel
93 * @max_channels: maximum number of channels to use
94 * @iterations: iterations before stopping test
95 * @xor_sources: number of xor source buffers
96 * @pq_sources: number of p+q source buffers
Hook, Garyed04b7c2019-06-18 22:03:04 +000097 * @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +020098 */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +020099struct dmatest_params {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200100 unsigned int buf_size;
101 char channel[20];
Guennadi Liakhovetskia85159f2013-12-30 14:58:04 +0100102 char device[32];
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200103 unsigned int threads_per_chan;
104 unsigned int max_channels;
105 unsigned int iterations;
106 unsigned int xor_sources;
107 unsigned int pq_sources;
Hook, Garyed04b7c2019-06-18 22:03:04 +0000108 unsigned int timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -0800109 bool noverify;
Yang Shunyong2e67a082018-02-02 17:51:09 +0800110 bool norandom;
Seraj Alijana875abf2018-12-10 08:52:37 +0000111 int alignment;
Seraj Alijan13396a12018-12-10 08:52:39 +0000112 unsigned int transfer_size;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200113};
114
115/**
116 * struct dmatest_info - test information.
117 * @params: test parameters
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200118 * @lock: access protection to the fields of this structure
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200119 */
Dan Williamsa310d032013-11-06 16:30:01 -0800120static struct dmatest_info {
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200121 /* Test parameters */
122 struct dmatest_params params;
Andy Shevchenko838cc702013-03-04 11:09:28 +0200123
124 /* Internal state */
125 struct list_head channels;
126 unsigned int nr_channels;
Andy Shevchenko851b7e12013-03-04 11:09:30 +0200127 struct mutex lock;
Dan Williamsa310d032013-11-06 16:30:01 -0800128 bool did_init;
129} test_info = {
130 .channels = LIST_HEAD_INIT(test_info.channels),
131 .lock = __MUTEX_INITIALIZER(test_info.lock),
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200132};
133
Dan Williamsa310d032013-11-06 16:30:01 -0800134static int dmatest_run_set(const char *val, const struct kernel_param *kp);
135static int dmatest_run_get(char *val, const struct kernel_param *kp);
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930136static const struct kernel_param_ops run_ops = {
Dan Williamsa310d032013-11-06 16:30:01 -0800137 .set = dmatest_run_set,
138 .get = dmatest_run_get,
139};
140static bool dmatest_run;
141module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(run, "Run the test (default: false)");
143
Seraj Alijand53513d2018-12-10 08:52:31 +0000144static int dmatest_chan_set(const char *val, const struct kernel_param *kp);
145static int dmatest_chan_get(char *val, const struct kernel_param *kp);
146static const struct kernel_param_ops multi_chan_ops = {
147 .set = dmatest_chan_set,
148 .get = dmatest_chan_get,
149};
150
151static char test_channel[20];
152static struct kparam_string newchan_kps = {
153 .string = test_channel,
154 .maxlen = 20,
155};
156module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644);
157MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
158
159static int dmatest_test_list_get(char *val, const struct kernel_param *kp);
160static const struct kernel_param_ops test_list_ops = {
161 .get = dmatest_test_list_get,
162};
163module_param_cb(test_list, &test_list_ops, NULL, 0444);
164MODULE_PARM_DESC(test_list, "Print current test list");
165
Dan Williamsa310d032013-11-06 16:30:01 -0800166/* Maximum amount of mismatched bytes in buffer to print */
167#define MAX_ERROR_COUNT 32
168
169/*
170 * Initialization patterns. All bytes in the source buffer has bit 7
171 * set, all bytes in the destination buffer has bit 7 cleared.
172 *
173 * Bit 6 is set for all bytes which are to be copied by the DMA
174 * engine. Bit 5 is set for all bytes which are to be overwritten by
175 * the DMA engine.
176 *
177 * The remaining bits are the inverse of a counter which increments by
178 * one for each byte address.
179 */
180#define PATTERN_SRC 0x80
181#define PATTERN_DST 0x00
182#define PATTERN_COPY 0x40
183#define PATTERN_OVERWRITE 0x20
184#define PATTERN_COUNT_MASK 0x1f
Sinan Kaya61b5f542017-06-29 22:30:58 -0400185#define PATTERN_MEMSET_IDX 0x01
Dan Williamsa310d032013-11-06 16:30:01 -0800186
Seraj Alijan6138f962018-12-10 08:52:34 +0000187/* Fixed point arithmetic ops */
188#define FIXPT_SHIFT 8
189#define FIXPNT_MASK 0xFF
190#define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT)
191#define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT)
192#define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT)
193
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500194/* poor man's completion - we want to use wait_event_freezable() on it */
195struct dmatest_done {
196 bool done;
197 wait_queue_head_t *wait;
198};
199
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200200struct dmatest_data {
201 u8 **raw;
202 u8 **aligned;
203 unsigned int cnt;
204 unsigned int off;
205};
206
Dan Williamsa310d032013-11-06 16:30:01 -0800207struct dmatest_thread {
208 struct list_head node;
209 struct dmatest_info *info;
210 struct task_struct *task;
211 struct dma_chan *chan;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200212 struct dmatest_data src;
213 struct dmatest_data dst;
Dan Williamsa310d032013-11-06 16:30:01 -0800214 enum dma_transaction_type type;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500215 wait_queue_head_t done_wait;
216 struct dmatest_done test_done;
Dan Williamsa310d032013-11-06 16:30:01 -0800217 bool done;
Seraj Alijand53513d2018-12-10 08:52:31 +0000218 bool pending;
Dan Williamsa310d032013-11-06 16:30:01 -0800219};
220
221struct dmatest_chan {
222 struct list_head node;
223 struct dma_chan *chan;
224 struct list_head threads;
225};
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200226
Dan Williams2d88ce72013-11-06 16:30:09 -0800227static DECLARE_WAIT_QUEUE_HEAD(thread_wait);
228static bool wait;
229
230static bool is_threaded_test_run(struct dmatest_info *info)
231{
232 struct dmatest_chan *dtc;
233
234 list_for_each_entry(dtc, &info->channels, node) {
235 struct dmatest_thread *thread;
236
237 list_for_each_entry(thread, &dtc->threads, node) {
238 if (!thread->done)
239 return true;
240 }
241 }
242
243 return false;
244}
245
Seraj Alijand53513d2018-12-10 08:52:31 +0000246static bool is_threaded_test_pending(struct dmatest_info *info)
247{
248 struct dmatest_chan *dtc;
249
250 list_for_each_entry(dtc, &info->channels, node) {
251 struct dmatest_thread *thread;
252
253 list_for_each_entry(thread, &dtc->threads, node) {
254 if (thread->pending)
255 return true;
256 }
257 }
258
259 return false;
260}
261
Dan Williams2d88ce72013-11-06 16:30:09 -0800262static int dmatest_wait_get(char *val, const struct kernel_param *kp)
263{
264 struct dmatest_info *info = &test_info;
265 struct dmatest_params *params = &info->params;
266
267 if (params->iterations)
268 wait_event(thread_wait, !is_threaded_test_run(info));
269 wait = true;
270 return param_get_bool(val, kp);
271}
272
Luis R. Rodriguez9c278472015-05-27 11:09:38 +0930273static const struct kernel_param_ops wait_ops = {
Dan Williams2d88ce72013-11-06 16:30:09 -0800274 .get = dmatest_wait_get,
275 .set = param_set_bool,
276};
277module_param_cb(wait, &wait_ops, &wait, S_IRUGO);
278MODULE_PARM_DESC(wait, "Wait for tests to complete (default: false)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700279
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200280static bool dmatest_match_channel(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200281 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700282{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200283 if (params->channel[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700284 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200285 return strcmp(dma_chan_name(chan), params->channel) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700286}
287
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200288static bool dmatest_match_device(struct dmatest_params *params,
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200289 struct dma_device *device)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700290{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200291 if (params->device[0] == '\0')
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700292 return true;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200293 return strcmp(dev_name(device->dev), params->device) == 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700294}
295
296static unsigned long dmatest_random(void)
297{
298 unsigned long buf;
299
Dan Williamsbe9fa5a2013-11-06 16:30:03 -0800300 prandom_bytes(&buf, sizeof(buf));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700301 return buf;
302}
303
Sinan Kaya61b5f542017-06-29 22:30:58 -0400304static inline u8 gen_inv_idx(u8 index, bool is_memset)
305{
306 u8 val = is_memset ? PATTERN_MEMSET_IDX : index;
307
308 return ~val & PATTERN_COUNT_MASK;
309}
310
311static inline u8 gen_src_value(u8 index, bool is_memset)
312{
313 return PATTERN_SRC | gen_inv_idx(index, is_memset);
314}
315
316static inline u8 gen_dst_value(u8 index, bool is_memset)
317{
318 return PATTERN_DST | gen_inv_idx(index, is_memset);
319}
320
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200321static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400322 unsigned int buf_size, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700323{
324 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700325 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700326
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700327 for (; (buf = *bufs); bufs++) {
328 for (i = 0; i < start; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400329 buf[i] = gen_src_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700330 for ( ; i < start + len; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400331 buf[i] = gen_src_value(i, is_memset) | PATTERN_COPY;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200332 for ( ; i < buf_size; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400333 buf[i] = gen_src_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700334 buf++;
335 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700336}
337
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200338static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400339 unsigned int buf_size, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700340{
341 unsigned int i;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700342 u8 *buf;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700343
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700344 for (; (buf = *bufs); bufs++) {
345 for (i = 0; i < start; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400346 buf[i] = gen_dst_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700347 for ( ; i < start + len; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400348 buf[i] = gen_dst_value(i, is_memset) |
349 PATTERN_OVERWRITE;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200350 for ( ; i < buf_size; i++)
Sinan Kaya61b5f542017-06-29 22:30:58 -0400351 buf[i] = gen_dst_value(i, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700352 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700353}
354
Dan Williams7b610172013-11-06 16:29:57 -0800355static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400356 unsigned int counter, bool is_srcbuf, bool is_memset)
Dan Williams7b610172013-11-06 16:29:57 -0800357{
358 u8 diff = actual ^ pattern;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400359 u8 expected = pattern | gen_inv_idx(counter, is_memset);
Dan Williams7b610172013-11-06 16:29:57 -0800360 const char *thread_name = current->comm;
361
362 if (is_srcbuf)
363 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
364 thread_name, index, expected, actual);
365 else if ((pattern & PATTERN_COPY)
366 && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
367 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
368 thread_name, index, expected, actual);
369 else if (diff & PATTERN_SRC)
370 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
371 thread_name, index, expected, actual);
372 else
373 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
374 thread_name, index, expected, actual);
375}
376
377static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
378 unsigned int end, unsigned int counter, u8 pattern,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400379 bool is_srcbuf, bool is_memset)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700380{
381 unsigned int i;
382 unsigned int error_count = 0;
383 u8 actual;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700384 u8 expected;
385 u8 *buf;
386 unsigned int counter_orig = counter;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700387
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700388 for (; (buf = *bufs); bufs++) {
389 counter = counter_orig;
390 for (i = start; i < end; i++) {
391 actual = buf[i];
Sinan Kaya61b5f542017-06-29 22:30:58 -0400392 expected = pattern | gen_inv_idx(counter, is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700393 if (actual != expected) {
Dan Williams7b610172013-11-06 16:29:57 -0800394 if (error_count < MAX_ERROR_COUNT)
395 dmatest_mismatch(actual, pattern, i,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400396 counter, is_srcbuf,
397 is_memset);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700398 error_count++;
399 }
400 counter++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700401 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700402 }
403
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200404 if (error_count > MAX_ERROR_COUNT)
Dan Williams7b610172013-11-06 16:29:57 -0800405 pr_warn("%s: %u errors suppressed\n",
Andy Shevchenko74b5c072013-03-04 11:09:32 +0200406 current->comm, error_count - MAX_ERROR_COUNT);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700407
408 return error_count;
409}
410
Tejun Heoadfa5432011-11-23 09:28:16 -0800411
412static void dmatest_callback(void *arg)
Dan Williamse44e0aa2009-03-25 09:13:25 -0700413{
Tejun Heoadfa5432011-11-23 09:28:16 -0800414 struct dmatest_done *done = arg;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500415 struct dmatest_thread *thread =
Yang Shunyong66b3bd22018-01-29 14:40:11 +0800416 container_of(done, struct dmatest_thread, test_done);
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500417 if (!thread->done) {
418 done->done = true;
419 wake_up_all(done->wait);
420 } else {
421 /*
422 * If thread->done, it means that this callback occurred
423 * after the parent thread has cleaned up. This can
424 * happen in the case that driver doesn't implement
425 * the terminate_all() functionality and a dma operation
426 * did not occur within the timeout period
427 */
428 WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
429 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700430}
431
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900432static unsigned int min_odd(unsigned int x, unsigned int y)
433{
434 unsigned int val = min(x, y);
435
436 return val % 2 ? val : val - 1;
437}
438
Dan Williams872f05c2013-11-06 16:29:58 -0800439static void result(const char *err, unsigned int n, unsigned int src_off,
440 unsigned int dst_off, unsigned int len, unsigned long data)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200441{
Jerome Blin2acec152014-03-04 10:38:55 +0100442 pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Dan Williams872f05c2013-11-06 16:29:58 -0800443 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200444}
445
Dan Williams872f05c2013-11-06 16:29:58 -0800446static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
447 unsigned int dst_off, unsigned int len,
448 unsigned long data)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200449{
Jerome Blin2acec152014-03-04 10:38:55 +0100450 pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300451 current->comm, n, err, src_off, dst_off, len, data);
Andy Shevchenko95019c82013-03-04 11:09:33 +0200452}
453
Andy Shevchenkoa835bb82014-10-22 16:16:42 +0300454#define verbose_result(err, n, src_off, dst_off, len, data) ({ \
455 if (verbose) \
456 result(err, n, src_off, dst_off, len, data); \
457 else \
458 dbg_result(err, n, src_off, dst_off, len, data);\
Dan Williams50137a72013-11-08 12:26:26 -0800459})
460
Dan Williams86727442013-11-06 16:30:07 -0800461static unsigned long long dmatest_persec(s64 runtime, unsigned int val)
Andy Shevchenko95019c82013-03-04 11:09:33 +0200462{
Dan Williams86727442013-11-06 16:30:07 -0800463 unsigned long long per_sec = 1000000;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200464
Dan Williams86727442013-11-06 16:30:07 -0800465 if (runtime <= 0)
466 return 0;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200467
Dan Williams86727442013-11-06 16:30:07 -0800468 /* drop precision until runtime is 32-bits */
469 while (runtime > UINT_MAX) {
470 runtime >>= 1;
471 per_sec <<= 1;
472 }
Andy Shevchenko95019c82013-03-04 11:09:33 +0200473
Dan Williams86727442013-11-06 16:30:07 -0800474 per_sec *= val;
Seraj Alijan6138f962018-12-10 08:52:34 +0000475 per_sec = INT_TO_FIXPT(per_sec);
Dan Williams86727442013-11-06 16:30:07 -0800476 do_div(per_sec, runtime);
Seraj Alijan6138f962018-12-10 08:52:34 +0000477
Dan Williams86727442013-11-06 16:30:07 -0800478 return per_sec;
Andy Shevchenko95019c82013-03-04 11:09:33 +0200479}
480
Dan Williams86727442013-11-06 16:30:07 -0800481static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
Andy Shevchenkod86b2f22013-03-04 11:09:34 +0200482{
Seraj Alijan6138f962018-12-10 08:52:34 +0000483 return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10));
Andy Shevchenko95019c82013-03-04 11:09:33 +0200484}
485
Alexandru Ardelean3b6679f2019-02-12 17:11:39 +0200486static void __dmatest_free_test_data(struct dmatest_data *d, unsigned int cnt)
487{
488 unsigned int i;
489
490 for (i = 0; i < cnt; i++)
491 kfree(d->raw[i]);
492
493 kfree(d->aligned);
494 kfree(d->raw);
495}
496
497static void dmatest_free_test_data(struct dmatest_data *d)
498{
499 __dmatest_free_test_data(d, d->cnt);
500}
501
502static int dmatest_alloc_test_data(struct dmatest_data *d,
503 unsigned int buf_size, u8 align)
504{
505 unsigned int i = 0;
506
507 d->raw = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
508 if (!d->raw)
509 return -ENOMEM;
510
511 d->aligned = kcalloc(d->cnt + 1, sizeof(u8 *), GFP_KERNEL);
512 if (!d->aligned)
513 goto err;
514
515 for (i = 0; i < d->cnt; i++) {
516 d->raw[i] = kmalloc(buf_size + align, GFP_KERNEL);
517 if (!d->raw[i])
518 goto err;
519
520 /* align to alignment restriction */
521 if (align)
522 d->aligned[i] = PTR_ALIGN(d->raw[i], align);
523 else
524 d->aligned[i] = d->raw[i];
525 }
526
527 return 0;
528err:
529 __dmatest_free_test_data(d, i);
530 return -ENOMEM;
531}
532
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700533/*
534 * This function repeatedly tests DMA transfers of various lengths and
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700535 * offsets for a given operation type until it is told to exit by
536 * kthread_stop(). There may be multiple threads running this function
537 * in parallel for a single channel, and there may be multiple channels
538 * being tested in parallel.
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700539 *
540 * Before each test, the source and destination buffer is initialized
541 * with a known pattern. This pattern is different depending on
542 * whether it's in an area which is supposed to be copied or
543 * overwritten, and different in the source and destination buffers.
544 * So if the DMA engine doesn't copy exactly what we tell it to copy,
545 * we'll notice.
546 */
547static int dmatest_func(void *data)
548{
549 struct dmatest_thread *thread = data;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500550 struct dmatest_done *done = &thread->test_done;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200551 struct dmatest_info *info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200552 struct dmatest_params *params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700553 struct dma_chan *chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900554 struct dma_device *dev;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700555 unsigned int error_count;
556 unsigned int failed_tests = 0;
557 unsigned int total_tests = 0;
558 dma_cookie_t cookie;
559 enum dma_status status;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700560 enum dma_ctrl_flags flags;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200561 u8 *pq_coefs = NULL;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700562 int ret;
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200563 unsigned int buf_size;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200564 struct dmatest_data *src;
565 struct dmatest_data *dst;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700566 int i;
Sinan Kayae9405ef2016-09-01 10:02:55 -0400567 ktime_t ktime, start, diff;
Thomas Gleixner8b0e1952016-12-25 12:30:41 +0100568 ktime_t filltime = 0;
569 ktime_t comparetime = 0;
Dan Williams86727442013-11-06 16:30:07 -0800570 s64 runtime = 0;
571 unsigned long long total_len = 0;
Seraj Alijan6138f962018-12-10 08:52:34 +0000572 unsigned long long iops = 0;
Dave Jiangd6481602016-11-29 13:22:20 -0700573 u8 align = 0;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400574 bool is_memset = false;
Laura Abbott72ef08b2018-04-10 18:02:16 -0700575 dma_addr_t *srcs;
576 dma_addr_t *dma_pq;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700577
Tejun Heoadfa5432011-11-23 09:28:16 -0800578 set_freezable();
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700579
580 ret = -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700581
582 smp_rmb();
Seraj Alijand53513d2018-12-10 08:52:31 +0000583 thread->pending = false;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200584 info = thread->info;
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200585 params = &info->params;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700586 chan = thread->chan;
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900587 dev = chan->device;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200588 src = &thread->src;
589 dst = &thread->dst;
Dave Jiangd6481602016-11-29 13:22:20 -0700590 if (thread->type == DMA_MEMCPY) {
Seraj Alijana875abf2018-12-10 08:52:37 +0000591 align = params->alignment < 0 ? dev->copy_align :
592 params->alignment;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200593 src->cnt = dst->cnt = 1;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400594 } else if (thread->type == DMA_MEMSET) {
Seraj Alijana875abf2018-12-10 08:52:37 +0000595 align = params->alignment < 0 ? dev->fill_align :
596 params->alignment;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200597 src->cnt = dst->cnt = 1;
Sinan Kaya61b5f542017-06-29 22:30:58 -0400598 is_memset = true;
Dave Jiangd6481602016-11-29 13:22:20 -0700599 } else if (thread->type == DMA_XOR) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900600 /* force odd to ensure dst = src */
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200601 src->cnt = min_odd(params->xor_sources | 1, dev->max_xor);
602 dst->cnt = 1;
Seraj Alijana875abf2018-12-10 08:52:37 +0000603 align = params->alignment < 0 ? dev->xor_align :
604 params->alignment;
Dan Williams58691d62009-08-29 19:09:27 -0700605 } else if (thread->type == DMA_PQ) {
Akinobu Mita8be9e32b2012-10-28 00:49:32 +0900606 /* force odd to ensure dst = src */
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200607 src->cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
608 dst->cnt = 2;
Seraj Alijana875abf2018-12-10 08:52:37 +0000609 align = params->alignment < 0 ? dev->pq_align :
610 params->alignment;
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200611
Dave Jiang31d18252016-11-29 13:22:01 -0700612 pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL);
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200613 if (!pq_coefs)
614 goto err_thread_type;
615
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200616 for (i = 0; i < src->cnt; i++)
Dan Williams58691d62009-08-29 19:09:27 -0700617 pq_coefs[i] = 1;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700618 } else
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200619 goto err_thread_type;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700620
Alexandru Ardelean787d3082018-11-01 18:07:16 +0200621 /* Check if buffer count fits into map count variable (u8) */
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200622 if ((src->cnt + dst->cnt) >= 255) {
Alexandru Ardelean787d3082018-11-01 18:07:16 +0200623 pr_err("too many buffers (%d of 255 supported)\n",
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200624 src->cnt + dst->cnt);
Dan Carpenter3f3c7552018-12-03 17:49:33 +0300625 goto err_free_coefs;
Alexandru Ardelean787d3082018-11-01 18:07:16 +0200626 }
627
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200628 buf_size = params->buf_size;
629 if (1 << align > buf_size) {
Alexandru Ardelean787d3082018-11-01 18:07:16 +0200630 pr_err("%u-byte buffer too small for %d-byte alignment\n",
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200631 buf_size, 1 << align);
Dan Carpenter3f3c7552018-12-03 17:49:33 +0300632 goto err_free_coefs;
Alexandru Ardelean787d3082018-11-01 18:07:16 +0200633 }
634
Alexandru Ardelean3b6679f2019-02-12 17:11:39 +0200635 if (dmatest_alloc_test_data(src, buf_size, align) < 0)
Dan Carpenter3f3c7552018-12-03 17:49:33 +0300636 goto err_free_coefs;
Dave Jiangd6481602016-11-29 13:22:20 -0700637
Alexandru Ardelean3b6679f2019-02-12 17:11:39 +0200638 if (dmatest_alloc_test_data(dst, buf_size, align) < 0)
639 goto err_src;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700640
Dan Williamse44e0aa2009-03-25 09:13:25 -0700641 set_user_nice(current, 10);
642
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200643 srcs = kcalloc(src->cnt, sizeof(dma_addr_t), GFP_KERNEL);
Laura Abbott72ef08b2018-04-10 18:02:16 -0700644 if (!srcs)
Alexandru Ardelean3b6679f2019-02-12 17:11:39 +0200645 goto err_dst;
Laura Abbott72ef08b2018-04-10 18:02:16 -0700646
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200647 dma_pq = kcalloc(dst->cnt, sizeof(dma_addr_t), GFP_KERNEL);
Laura Abbott72ef08b2018-04-10 18:02:16 -0700648 if (!dma_pq)
649 goto err_srcs_array;
650
Ira Snyderb203bd32011-03-03 07:54:53 +0000651 /*
Bartlomiej Zolnierkiewiczd1cab342013-10-18 19:35:21 +0200652 * src and dst buffers are freed by ourselves below
Ira Snyderb203bd32011-03-03 07:54:53 +0000653 */
Bartlomiej Zolnierkiewicz0776ae72013-10-18 19:35:33 +0200654 flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700655
Dan Williams86727442013-11-06 16:30:07 -0800656 ktime = ktime_get();
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200657 while (!kthread_should_stop()
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200658 && !(params->iterations && total_tests >= params->iterations)) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700659 struct dma_async_tx_descriptor *tx = NULL;
Dan Williams4076e752013-11-06 16:30:10 -0800660 struct dmaengine_unmap_data *um;
Dan Williams4076e752013-11-06 16:30:10 -0800661 dma_addr_t *dsts;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200662 unsigned int len;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700663
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700664 total_tests++;
665
Seraj Alijan13396a12018-12-10 08:52:39 +0000666 if (params->transfer_size) {
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200667 if (params->transfer_size >= buf_size) {
Seraj Alijan13396a12018-12-10 08:52:39 +0000668 pr_err("%u-byte transfer size must be lower than %u-buffer size\n",
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200669 params->transfer_size, buf_size);
Seraj Alijan13396a12018-12-10 08:52:39 +0000670 break;
671 }
672 len = params->transfer_size;
673 } else if (params->norandom) {
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200674 len = buf_size;
Seraj Alijan13396a12018-12-10 08:52:39 +0000675 } else {
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200676 len = dmatest_random() % buf_size + 1;
Seraj Alijan13396a12018-12-10 08:52:39 +0000677 }
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300678
Seraj Alijan13396a12018-12-10 08:52:39 +0000679 /* Do not alter transfer size explicitly defined by user */
680 if (!params->transfer_size) {
681 len = (len >> align) << align;
682 if (!len)
683 len = 1 << align;
684 }
Andy Shevchenkoede23a52014-10-22 16:16:43 +0300685 total_len += len;
686
Yang Shunyong2e67a082018-02-02 17:51:09 +0800687 if (params->norandom) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200688 src->off = 0;
689 dst->off = 0;
Dan Williamse3b9c342013-11-06 16:30:05 -0800690 } else {
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200691 src->off = dmatest_random() % (buf_size - len + 1);
692 dst->off = dmatest_random() % (buf_size - len + 1);
Dan Williamse3b9c342013-11-06 16:30:05 -0800693
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200694 src->off = (src->off >> align) << align;
695 dst->off = (dst->off >> align) << align;
Yang Shunyong2e67a082018-02-02 17:51:09 +0800696 }
Dan Williamse3b9c342013-11-06 16:30:05 -0800697
Yang Shunyong2e67a082018-02-02 17:51:09 +0800698 if (!params->noverify) {
699 start = ktime_get();
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200700 dmatest_init_srcs(src->aligned, src->off, len,
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200701 buf_size, is_memset);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200702 dmatest_init_dsts(dst->aligned, dst->off, len,
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200703 buf_size, is_memset);
Sinan Kayae9405ef2016-09-01 10:02:55 -0400704
705 diff = ktime_sub(ktime_get(), start);
706 filltime = ktime_add(filltime, diff);
Dan Williamse3b9c342013-11-06 16:30:05 -0800707 }
708
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200709 um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
Dan Williams4076e752013-11-06 16:30:10 -0800710 GFP_KERNEL);
711 if (!um) {
712 failed_tests++;
713 result("unmap data NULL", total_tests,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200714 src->off, dst->off, len, ret);
Dan Williams4076e752013-11-06 16:30:10 -0800715 continue;
716 }
Dan Williams83544ae2009-09-08 17:42:53 -0700717
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200718 um->len = buf_size;
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200719 for (i = 0; i < src->cnt; i++) {
720 void *buf = src->aligned[i];
Dan Williams4076e752013-11-06 16:30:10 -0800721 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800722 unsigned long pg_off = offset_in_page(buf);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700723
Dan Williams4076e752013-11-06 16:30:10 -0800724 um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
725 um->len, DMA_TO_DEVICE);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200726 srcs[i] = um->addr[i] + src->off;
Dan Williams4076e752013-11-06 16:30:10 -0800727 ret = dma_mapping_error(dev->dev, um->addr[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800728 if (ret) {
Dan Williams872f05c2013-11-06 16:29:58 -0800729 result("src mapping error", total_tests,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200730 src->off, dst->off, len, ret);
Andy Shevchenko64543682019-01-30 21:48:44 +0200731 goto error_unmap_continue;
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800732 }
Dan Williams4076e752013-11-06 16:30:10 -0800733 um->to_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700734 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700735 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200736 dsts = &um->addr[src->cnt];
737 for (i = 0; i < dst->cnt; i++) {
738 void *buf = dst->aligned[i];
Dan Williams4076e752013-11-06 16:30:10 -0800739 struct page *pg = virt_to_page(buf);
Geliang Tangf62e5f62017-04-22 09:18:03 +0800740 unsigned long pg_off = offset_in_page(buf);
Dan Williams4076e752013-11-06 16:30:10 -0800741
742 dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
743 DMA_BIDIRECTIONAL);
744 ret = dma_mapping_error(dev->dev, dsts[i]);
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800745 if (ret) {
Dan Williams872f05c2013-11-06 16:29:58 -0800746 result("dst mapping error", total_tests,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200747 src->off, dst->off, len, ret);
Andy Shevchenko64543682019-01-30 21:48:44 +0200748 goto error_unmap_continue;
Andy Shevchenkoafde3be2012-12-17 15:59:53 -0800749 }
Dan Williams4076e752013-11-06 16:30:10 -0800750 um->bidi_cnt++;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700751 }
Atsushi Nemotod86be862009-01-13 09:22:20 -0700752
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700753 if (thread->type == DMA_MEMCPY)
754 tx = dev->device_prep_dma_memcpy(chan,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200755 dsts[0] + dst->off,
Dan Williams4076e752013-11-06 16:30:10 -0800756 srcs[0], len, flags);
Sinan Kaya61b5f542017-06-29 22:30:58 -0400757 else if (thread->type == DMA_MEMSET)
758 tx = dev->device_prep_dma_memset(chan,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200759 dsts[0] + dst->off,
760 *(src->aligned[0] + src->off),
Sinan Kaya61b5f542017-06-29 22:30:58 -0400761 len, flags);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700762 else if (thread->type == DMA_XOR)
763 tx = dev->device_prep_dma_xor(chan,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200764 dsts[0] + dst->off,
765 srcs, src->cnt,
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700766 len, flags);
Dan Williams58691d62009-08-29 19:09:27 -0700767 else if (thread->type == DMA_PQ) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200768 for (i = 0; i < dst->cnt; i++)
769 dma_pq[i] = dsts[i] + dst->off;
Dan Williams4076e752013-11-06 16:30:10 -0800770 tx = dev->device_prep_dma_pq(chan, dma_pq, srcs,
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200771 src->cnt, pq_coefs,
Dan Williams58691d62009-08-29 19:09:27 -0700772 len, flags);
773 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700774
Atsushi Nemotod86be862009-01-13 09:22:20 -0700775 if (!tx) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200776 result("prep error", total_tests, src->off,
777 dst->off, len, ret);
Atsushi Nemotod86be862009-01-13 09:22:20 -0700778 msleep(100);
Andy Shevchenko64543682019-01-30 21:48:44 +0200779 goto error_unmap_continue;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700780 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700781
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500782 done->done = false;
Dan Williamse44e0aa2009-03-25 09:13:25 -0700783 tx->callback = dmatest_callback;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500784 tx->callback_param = done;
Atsushi Nemotod86be862009-01-13 09:22:20 -0700785 cookie = tx->tx_submit(tx);
786
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700787 if (dma_submit_error(cookie)) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200788 result("submit error", total_tests, src->off,
789 dst->off, len, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700790 msleep(100);
Andy Shevchenko64543682019-01-30 21:48:44 +0200791 goto error_unmap_continue;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700792 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700793 dma_async_issue_pending(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700794
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500795 wait_event_freezable_timeout(thread->done_wait, done->done,
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200796 msecs_to_jiffies(params->timeout));
Guennadi Liakhovetski981ed702011-08-18 16:50:51 +0200797
Dan Williamse44e0aa2009-03-25 09:13:25 -0700798 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700799
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500800 if (!done->done) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200801 result("test timed out", total_tests, src->off, dst->off,
Dan Williams872f05c2013-11-06 16:29:58 -0800802 len, 0);
Andy Shevchenko64543682019-01-30 21:48:44 +0200803 goto error_unmap_continue;
Vinod Koul19e9f992013-10-16 13:37:27 +0530804 } else if (status != DMA_COMPLETE) {
Dan Williams872f05c2013-11-06 16:29:58 -0800805 result(status == DMA_ERROR ?
806 "completion error status" :
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200807 "completion busy status", total_tests, src->off,
808 dst->off, len, ret);
Andy Shevchenko64543682019-01-30 21:48:44 +0200809 goto error_unmap_continue;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700810 }
Dan Williamse44e0aa2009-03-25 09:13:25 -0700811
Andy Shevchenko64543682019-01-30 21:48:44 +0200812 dmaengine_unmap_put(um);
813
Dan Williamse3b9c342013-11-06 16:30:05 -0800814 if (params->noverify) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200815 verbose_result("test passed", total_tests, src->off,
816 dst->off, len, 0);
Dan Williamse3b9c342013-11-06 16:30:05 -0800817 continue;
818 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700819
Sinan Kayae9405ef2016-09-01 10:02:55 -0400820 start = ktime_get();
Dan Williams872f05c2013-11-06 16:29:58 -0800821 pr_debug("%s: verifying source buffer...\n", current->comm);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200822 error_count = dmatest_verify(src->aligned, 0, src->off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400823 0, PATTERN_SRC, true, is_memset);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200824 error_count += dmatest_verify(src->aligned, src->off,
825 src->off + len, src->off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400826 PATTERN_SRC | PATTERN_COPY, true, is_memset);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200827 error_count += dmatest_verify(src->aligned, src->off + len,
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200828 buf_size, src->off + len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400829 PATTERN_SRC, true, is_memset);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700830
Dan Williams872f05c2013-11-06 16:29:58 -0800831 pr_debug("%s: verifying dest buffer...\n", current->comm);
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200832 error_count += dmatest_verify(dst->aligned, 0, dst->off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400833 0, PATTERN_DST, false, is_memset);
834
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200835 error_count += dmatest_verify(dst->aligned, dst->off,
836 dst->off + len, src->off,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400837 PATTERN_SRC | PATTERN_COPY, false, is_memset);
838
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200839 error_count += dmatest_verify(dst->aligned, dst->off + len,
Alexandru Ardelean41d00bb2019-02-12 17:11:38 +0200840 buf_size, dst->off + len,
Sinan Kaya61b5f542017-06-29 22:30:58 -0400841 PATTERN_DST, false, is_memset);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700842
Sinan Kayae9405ef2016-09-01 10:02:55 -0400843 diff = ktime_sub(ktime_get(), start);
844 comparetime = ktime_add(comparetime, diff);
845
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700846 if (error_count) {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200847 result("data error", total_tests, src->off, dst->off,
Dan Williams872f05c2013-11-06 16:29:58 -0800848 len, error_count);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700849 failed_tests++;
850 } else {
Alexandru Ardelean361deb72019-02-12 17:11:37 +0200851 verbose_result("test passed", total_tests, src->off,
852 dst->off, len, 0);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700853 }
Andy Shevchenko64543682019-01-30 21:48:44 +0200854
855 continue;
856
857error_unmap_continue:
858 dmaengine_unmap_put(um);
859 failed_tests++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700860 }
Sinan Kayae9405ef2016-09-01 10:02:55 -0400861 ktime = ktime_sub(ktime_get(), ktime);
862 ktime = ktime_sub(ktime, comparetime);
863 ktime = ktime_sub(ktime, filltime);
864 runtime = ktime_to_us(ktime);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700865
866 ret = 0;
Laura Abbott72ef08b2018-04-10 18:02:16 -0700867 kfree(dma_pq);
868err_srcs_array:
869 kfree(srcs);
Alexandru Ardelean3b6679f2019-02-12 17:11:39 +0200870err_dst:
871 dmatest_free_test_data(dst);
872err_src:
873 dmatest_free_test_data(src);
Dan Carpenter3f3c7552018-12-03 17:49:33 +0300874err_free_coefs:
Andy Shevchenko945b5af2013-03-04 11:09:26 +0200875 kfree(pq_coefs);
876err_thread_type:
Seraj Alijan6138f962018-12-10 08:52:34 +0000877 iops = dmatest_persec(runtime, total_tests);
878 pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n",
Dan Williams86727442013-11-06 16:30:07 -0800879 current->comm, total_tests, failed_tests,
Seraj Alijan6138f962018-12-10 08:52:34 +0000880 FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops),
Dan Williams86727442013-11-06 16:30:07 -0800881 dmatest_KBs(runtime, total_len), ret);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200882
Viresh Kumar9704efa2011-07-29 16:21:57 +0530883 /* terminate all transfers on specified channels */
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500884 if (ret || failed_tests)
Alexandru Ardeleanfbffb6b2018-10-29 11:23:36 +0200885 dmaengine_terminate_sync(chan);
Shiraz Hashim5e034f72012-11-09 15:26:29 +0000886
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +0200887 thread->done = true;
Dan Williams2d88ce72013-11-06 16:30:09 -0800888 wake_up(&thread_wait);
Nicolas Ferre0a2ff57d2009-07-03 19:26:51 +0200889
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700890 return ret;
891}
892
893static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
894{
895 struct dmatest_thread *thread;
896 struct dmatest_thread *_thread;
897 int ret;
898
899 list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
900 ret = kthread_stop(thread->task);
Dan Williams0adff802013-11-06 16:30:00 -0800901 pr_debug("thread %s exited with status %d\n",
902 thread->task->comm, ret);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700903 list_del(&thread->node);
Dan Williams2d88ce72013-11-06 16:30:09 -0800904 put_task_struct(thread->task);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700905 kfree(thread);
906 }
Viresh Kumar9704efa2011-07-29 16:21:57 +0530907
908 /* terminate all transfers on specified channels */
Alexandru Ardeleanfbffb6b2018-10-29 11:23:36 +0200909 dmaengine_terminate_sync(dtc->chan);
Viresh Kumar9704efa2011-07-29 16:21:57 +0530910
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700911 kfree(dtc);
912}
913
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200914static int dmatest_add_threads(struct dmatest_info *info,
915 struct dmatest_chan *dtc, enum dma_transaction_type type)
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700916{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200917 struct dmatest_params *params = &info->params;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700918 struct dmatest_thread *thread;
919 struct dma_chan *chan = dtc->chan;
920 char *op;
921 unsigned int i;
922
923 if (type == DMA_MEMCPY)
924 op = "copy";
Sinan Kaya61b5f542017-06-29 22:30:58 -0400925 else if (type == DMA_MEMSET)
926 op = "set";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700927 else if (type == DMA_XOR)
928 op = "xor";
Dan Williams58691d62009-08-29 19:09:27 -0700929 else if (type == DMA_PQ)
930 op = "pq";
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700931 else
932 return -EINVAL;
933
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +0200934 for (i = 0; i < params->threads_per_chan; i++) {
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700935 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
936 if (!thread) {
Dan Williams0adff802013-11-06 16:30:00 -0800937 pr_warn("No memory for %s-%s%u\n",
938 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700939 break;
940 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200941 thread->info = info;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700942 thread->chan = dtc->chan;
943 thread->type = type;
Adam Wallis6f6a23a2017-11-27 10:45:01 -0500944 thread->test_done.wait = &thread->done_wait;
945 init_waitqueue_head(&thread->done_wait);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700946 smp_wmb();
Dan Williams2d88ce72013-11-06 16:30:09 -0800947 thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700948 dma_chan_name(chan), op, i);
949 if (IS_ERR(thread->task)) {
Dan Williams2d88ce72013-11-06 16:30:09 -0800950 pr_warn("Failed to create thread %s-%s%u\n",
Dan Williams0adff802013-11-06 16:30:00 -0800951 dma_chan_name(chan), op, i);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700952 kfree(thread);
953 break;
954 }
955
956 /* srcbuf and dstbuf are allocated by the thread itself */
Dan Williams2d88ce72013-11-06 16:30:09 -0800957 get_task_struct(thread->task);
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700958 list_add_tail(&thread->node, &dtc->threads);
Seraj Alijand53513d2018-12-10 08:52:31 +0000959 thread->pending = true;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700960 }
961
962 return i;
963}
964
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200965static int dmatest_add_channel(struct dmatest_info *info,
966 struct dma_chan *chan)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700967{
968 struct dmatest_chan *dtc;
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700969 struct dma_device *dma_dev = chan->device;
970 unsigned int thread_count = 0;
Kulikov Vasiliyb9033e62010-07-17 19:19:48 +0400971 int cnt;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700972
Andrew Morton6fdb8bd2008-09-19 04:16:23 -0700973 dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700974 if (!dtc) {
Dan Williams0adff802013-11-06 16:30:00 -0800975 pr_warn("No memory for %s\n", dma_chan_name(chan));
Dan Williams33df8ca2009-01-06 11:38:15 -0700976 return -ENOMEM;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700977 }
978
979 dtc->chan = chan;
980 INIT_LIST_HEAD(&dtc->threads);
981
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700982 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530983 if (dmatest == 0) {
984 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
985 thread_count += cnt > 0 ? cnt : 0;
986 }
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700987 }
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530988
Sinan Kaya61b5f542017-06-29 22:30:58 -0400989 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530990 if (dmatest == 1) {
Dave Jiangc678fa62017-08-21 10:23:13 -0700991 cnt = dmatest_add_threads(info, dtc, DMA_MEMSET);
Kedareswara rao Appanaa0d4cb42016-06-09 21:10:14 +0530992 thread_count += cnt > 0 ? cnt : 0;
993 }
994 }
995
Dan Williamsb54d5cb2009-03-25 09:13:25 -0700996 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +0200997 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
Nicolas Ferref1aef8b2009-07-06 18:19:44 +0200998 thread_count += cnt > 0 ? cnt : 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -0700999 }
Dan Williams58691d62009-08-29 19:09:27 -07001000 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001001 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
Dr. David Alan Gilbertd07a74a2011-08-25 16:13:55 -07001002 thread_count += cnt > 0 ? cnt : 0;
Dan Williams58691d62009-08-29 19:09:27 -07001003 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001004
Seraj Alijand53513d2018-12-10 08:52:31 +00001005 pr_info("Added %u threads using %s\n",
Dan Williamsb54d5cb2009-03-25 09:13:25 -07001006 thread_count, dma_chan_name(chan));
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001007
Andy Shevchenko838cc702013-03-04 11:09:28 +02001008 list_add_tail(&dtc->node, &info->channels);
1009 info->nr_channels++;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001010
Dan Williams33df8ca2009-01-06 11:38:15 -07001011 return 0;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001012}
1013
Dan Williams7dd60252009-01-06 11:38:19 -07001014static bool filter(struct dma_chan *chan, void *param)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001015{
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001016 struct dmatest_params *params = param;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001017
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001018 if (!dmatest_match_channel(params, chan) ||
1019 !dmatest_match_device(params, chan->device))
Dan Williams7dd60252009-01-06 11:38:19 -07001020 return false;
Dan Williams33df8ca2009-01-06 11:38:15 -07001021 else
Dan Williams7dd60252009-01-06 11:38:19 -07001022 return true;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001023}
1024
Dan Williamsa9e55492013-11-06 16:30:02 -08001025static void request_channels(struct dmatest_info *info,
1026 enum dma_transaction_type type)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001027{
Dan Williams33df8ca2009-01-06 11:38:15 -07001028 dma_cap_mask_t mask;
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001029
Dan Williams33df8ca2009-01-06 11:38:15 -07001030 dma_cap_zero(mask);
Dan Williamsa9e55492013-11-06 16:30:02 -08001031 dma_cap_set(type, mask);
Dan Williams33df8ca2009-01-06 11:38:15 -07001032 for (;;) {
Dan Williamsa9e55492013-11-06 16:30:02 -08001033 struct dmatest_params *params = &info->params;
1034 struct dma_chan *chan;
1035
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001036 chan = dma_request_channel(mask, filter, params);
Dan Williams33df8ca2009-01-06 11:38:15 -07001037 if (chan) {
Dan Williamsa9e55492013-11-06 16:30:02 -08001038 if (dmatest_add_channel(info, chan)) {
Dan Williams33df8ca2009-01-06 11:38:15 -07001039 dma_release_channel(chan);
1040 break; /* add_channel failed, punt */
1041 }
1042 } else
1043 break; /* no more channels available */
Andy Shevchenko15b8a8e2013-03-04 11:09:29 +02001044 if (params->max_channels &&
1045 info->nr_channels >= params->max_channels)
Dan Williams33df8ca2009-01-06 11:38:15 -07001046 break; /* we have all we need */
1047 }
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001048}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001049
Seraj Alijand53513d2018-12-10 08:52:31 +00001050static void add_threaded_test(struct dmatest_info *info)
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001051{
1052 struct dmatest_params *params = &info->params;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001053
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001054 /* Copy test parameters */
Andy Shevchenkoa6c268d2013-07-23 18:36:46 +03001055 params->buf_size = test_buf_size;
1056 strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
1057 strlcpy(params->device, strim(test_device), sizeof(params->device));
1058 params->threads_per_chan = threads_per_chan;
1059 params->max_channels = max_channels;
1060 params->iterations = iterations;
1061 params->xor_sources = xor_sources;
1062 params->pq_sources = pq_sources;
1063 params->timeout = timeout;
Dan Williamse3b9c342013-11-06 16:30:05 -08001064 params->noverify = noverify;
Yang Shunyong2e67a082018-02-02 17:51:09 +08001065 params->norandom = norandom;
Seraj Alijana875abf2018-12-10 08:52:37 +00001066 params->alignment = alignment;
Seraj Alijan13396a12018-12-10 08:52:39 +00001067 params->transfer_size = transfer_size;
Dan Williamsa310d032013-11-06 16:30:01 -08001068
Dan Williamsa9e55492013-11-06 16:30:02 -08001069 request_channels(info, DMA_MEMCPY);
Sinan Kaya61b5f542017-06-29 22:30:58 -04001070 request_channels(info, DMA_MEMSET);
Dan Williamsa9e55492013-11-06 16:30:02 -08001071 request_channels(info, DMA_XOR);
1072 request_channels(info, DMA_PQ);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001073}
1074
Seraj Alijand53513d2018-12-10 08:52:31 +00001075static void run_pending_tests(struct dmatest_info *info)
1076{
1077 struct dmatest_chan *dtc;
1078 unsigned int thread_count = 0;
1079
1080 list_for_each_entry(dtc, &info->channels, node) {
1081 struct dmatest_thread *thread;
1082
1083 thread_count = 0;
1084 list_for_each_entry(thread, &dtc->threads, node) {
1085 wake_up_process(thread->task);
1086 thread_count++;
1087 }
1088 pr_info("Started %u threads using %s\n",
1089 thread_count, dma_chan_name(dtc->chan));
1090 }
1091}
1092
Dan Williamsa310d032013-11-06 16:30:01 -08001093static void stop_threaded_test(struct dmatest_info *info)
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001094{
1095 struct dmatest_chan *dtc, *_dtc;
1096 struct dma_chan *chan;
1097
1098 list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
1099 list_del(&dtc->node);
1100 chan = dtc->chan;
1101 dmatest_cleanup_channel(dtc);
Dan Williams0adff802013-11-06 16:30:00 -08001102 pr_debug("dropped channel %s\n", dma_chan_name(chan));
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001103 dma_release_channel(chan);
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001104 }
Dan Williams33df8ca2009-01-06 11:38:15 -07001105
Dan Williams7cbd4872009-03-04 16:06:03 -07001106 info->nr_channels = 0;
Dan Williams33df8ca2009-01-06 11:38:15 -07001107}
Andy Shevchenko838cc702013-03-04 11:09:28 +02001108
Seraj Alijand53513d2018-12-10 08:52:31 +00001109static void start_threaded_tests(struct dmatest_info *info)
Dan Williams7cbd4872009-03-04 16:06:03 -07001110{
Dan Williamsa310d032013-11-06 16:30:01 -08001111 /* we might be called early to set run=, defer running until all
1112 * parameters have been evaluated
1113 */
1114 if (!info->did_init)
Dan Williamsa9e55492013-11-06 16:30:02 -08001115 return;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001116
Seraj Alijand53513d2018-12-10 08:52:31 +00001117 run_pending_tests(info);
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001118}
1119
Dan Williamsa310d032013-11-06 16:30:01 -08001120static int dmatest_run_get(char *val, const struct kernel_param *kp)
Andy Shevchenkobcc567e2013-05-23 14:29:53 +03001121{
Dan Williamsa310d032013-11-06 16:30:01 -08001122 struct dmatest_info *info = &test_info;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001123
1124 mutex_lock(&info->lock);
Dan Williamsa310d032013-11-06 16:30:01 -08001125 if (is_threaded_test_run(info)) {
1126 dmatest_run = true;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001127 } else {
Seraj Alijand53513d2018-12-10 08:52:31 +00001128 if (!is_threaded_test_pending(info))
1129 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001130 dmatest_run = false;
Andy Shevchenko3e5ccd82013-03-04 11:09:31 +02001131 }
Dan Williamsa310d032013-11-06 16:30:01 -08001132 mutex_unlock(&info->lock);
1133
1134 return param_get_bool(val, kp);
1135}
1136
1137static int dmatest_run_set(const char *val, const struct kernel_param *kp)
1138{
1139 struct dmatest_info *info = &test_info;
1140 int ret;
1141
1142 mutex_lock(&info->lock);
1143 ret = param_set_bool(val, kp);
1144 if (ret) {
1145 mutex_unlock(&info->lock);
1146 return ret;
Seraj Alijand53513d2018-12-10 08:52:31 +00001147 } else if (dmatest_run) {
1148 if (is_threaded_test_pending(info))
1149 start_threaded_tests(info);
1150 else
1151 pr_info("Could not start test, no channels configured\n");
1152 } else {
1153 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001154 }
1155
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001156 mutex_unlock(&info->lock);
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001157
Dan Williamsa310d032013-11-06 16:30:01 -08001158 return ret;
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001159}
1160
Seraj Alijand53513d2018-12-10 08:52:31 +00001161static int dmatest_chan_set(const char *val, const struct kernel_param *kp)
1162{
1163 struct dmatest_info *info = &test_info;
1164 struct dmatest_chan *dtc;
1165 char chan_reset_val[20];
1166 int ret = 0;
1167
1168 mutex_lock(&info->lock);
1169 ret = param_set_copystring(val, kp);
1170 if (ret) {
1171 mutex_unlock(&info->lock);
1172 return ret;
1173 }
1174 /*Clear any previously run threads */
1175 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info))
1176 stop_threaded_test(info);
1177 /* Reject channels that are already registered */
1178 if (is_threaded_test_pending(info)) {
1179 list_for_each_entry(dtc, &info->channels, node) {
1180 if (strcmp(dma_chan_name(dtc->chan),
1181 strim(test_channel)) == 0) {
1182 dtc = list_last_entry(&info->channels,
1183 struct dmatest_chan,
1184 node);
1185 strlcpy(chan_reset_val,
1186 dma_chan_name(dtc->chan),
1187 sizeof(chan_reset_val));
1188 ret = -EBUSY;
1189 goto add_chan_err;
1190 }
1191 }
1192 }
1193
1194 add_threaded_test(info);
1195
1196 /* Check if channel was added successfully */
1197 dtc = list_last_entry(&info->channels, struct dmatest_chan, node);
1198
1199 if (dtc->chan) {
1200 /*
1201 * if new channel was not successfully added, revert the
1202 * "test_channel" string to the name of the last successfully
1203 * added channel. exception for when users issues empty string
1204 * to channel parameter.
1205 */
1206 if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0)
1207 && (strcmp("", strim(test_channel)) != 0)) {
1208 ret = -EINVAL;
1209 strlcpy(chan_reset_val, dma_chan_name(dtc->chan),
1210 sizeof(chan_reset_val));
1211 goto add_chan_err;
1212 }
1213
1214 } else {
1215 /* Clear test_channel if no channels were added successfully */
1216 strlcpy(chan_reset_val, "", sizeof(chan_reset_val));
1217 ret = -EBUSY;
1218 goto add_chan_err;
1219 }
1220
1221 mutex_unlock(&info->lock);
1222
1223 return ret;
1224
1225add_chan_err:
1226 param_set_copystring(chan_reset_val, kp);
1227 mutex_unlock(&info->lock);
1228
1229 return ret;
1230}
1231
1232static int dmatest_chan_get(char *val, const struct kernel_param *kp)
1233{
1234 struct dmatest_info *info = &test_info;
1235
1236 mutex_lock(&info->lock);
1237 if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) {
1238 stop_threaded_test(info);
1239 strlcpy(test_channel, "", sizeof(test_channel));
1240 }
1241 mutex_unlock(&info->lock);
1242
1243 return param_get_string(val, kp);
1244}
1245
1246static int dmatest_test_list_get(char *val, const struct kernel_param *kp)
1247{
1248 struct dmatest_info *info = &test_info;
1249 struct dmatest_chan *dtc;
1250 unsigned int thread_count = 0;
1251
1252 list_for_each_entry(dtc, &info->channels, node) {
1253 struct dmatest_thread *thread;
1254
1255 thread_count = 0;
1256 list_for_each_entry(thread, &dtc->threads, node) {
1257 thread_count++;
1258 }
1259 pr_info("%u threads using %s\n",
1260 thread_count, dma_chan_name(dtc->chan));
1261 }
1262
1263 return 0;
1264}
1265
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001266static int __init dmatest_init(void)
1267{
1268 struct dmatest_info *info = &test_info;
Dan Williams2d88ce72013-11-06 16:30:09 -08001269 struct dmatest_params *params = &info->params;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001270
Dan Williamsa310d032013-11-06 16:30:01 -08001271 if (dmatest_run) {
1272 mutex_lock(&info->lock);
Seraj Alijand53513d2018-12-10 08:52:31 +00001273 add_threaded_test(info);
1274 run_pending_tests(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001275 mutex_unlock(&info->lock);
1276 }
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001277
Dan Williams2d88ce72013-11-06 16:30:09 -08001278 if (params->iterations && wait)
1279 wait_event(thread_wait, !is_threaded_test_run(info));
Andy Shevchenko838cc702013-03-04 11:09:28 +02001280
Dan Williamsa310d032013-11-06 16:30:01 -08001281 /* module parameters are stable, inittime tests are started,
1282 * let userspace take over 'run' control
1283 */
1284 info->did_init = true;
Andy Shevchenko95019c82013-03-04 11:09:33 +02001285
Andy Shevchenko851b7e12013-03-04 11:09:30 +02001286 return 0;
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001287}
1288/* when compiled-in wait for drivers to load first */
1289late_initcall(dmatest_init);
1290
1291static void __exit dmatest_exit(void)
1292{
1293 struct dmatest_info *info = &test_info;
1294
Dan Williamsa310d032013-11-06 16:30:01 -08001295 mutex_lock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001296 stop_threaded_test(info);
Dan Williamsa310d032013-11-06 16:30:01 -08001297 mutex_unlock(&info->lock);
Andy Shevchenkoe03e93a2013-03-04 11:09:27 +02001298}
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001299module_exit(dmatest_exit);
1300
Jean Delvaree05503e2011-05-18 16:49:24 +02001301MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Haavard Skinnemoen4a776f02008-07-08 11:58:45 -07001302MODULE_LICENSE("GPL v2");