dmatest: replace stored results mechanism, with uniform messages
[linux-drm-fsl-dcu.git] / drivers / dma / dmatest.c
1 /*
2  * DMA Engine test module
3  *
4  * Copyright (C) 2007 Atmel Corporation
5  * Copyright (C) 2013 Intel Corporation
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmaengine.h>
16 #include <linux/freezer.h>
17 #include <linux/init.h>
18 #include <linux/kthread.h>
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/random.h>
22 #include <linux/slab.h>
23 #include <linux/wait.h>
24 #include <linux/ctype.h>
25 #include <linux/debugfs.h>
26 #include <linux/uaccess.h>
27 #include <linux/seq_file.h>
28
29 static unsigned int test_buf_size = 16384;
30 module_param(test_buf_size, uint, S_IRUGO | S_IWUSR);
31 MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
32
33 static char test_channel[20];
34 module_param_string(channel, test_channel, sizeof(test_channel),
35                 S_IRUGO | S_IWUSR);
36 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
37
38 static char test_device[20];
39 module_param_string(device, test_device, sizeof(test_device),
40                 S_IRUGO | S_IWUSR);
41 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
42
43 static unsigned int threads_per_chan = 1;
44 module_param(threads_per_chan, uint, S_IRUGO | S_IWUSR);
45 MODULE_PARM_DESC(threads_per_chan,
46                 "Number of threads to start per channel (default: 1)");
47
48 static unsigned int max_channels;
49 module_param(max_channels, uint, S_IRUGO | S_IWUSR);
50 MODULE_PARM_DESC(max_channels,
51                 "Maximum number of channels to use (default: all)");
52
53 static unsigned int iterations;
54 module_param(iterations, uint, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(iterations,
56                 "Iterations before stopping test (default: infinite)");
57
58 static unsigned int xor_sources = 3;
59 module_param(xor_sources, uint, S_IRUGO | S_IWUSR);
60 MODULE_PARM_DESC(xor_sources,
61                 "Number of xor source buffers (default: 3)");
62
63 static unsigned int pq_sources = 3;
64 module_param(pq_sources, uint, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(pq_sources,
66                 "Number of p+q source buffers (default: 3)");
67
68 static int timeout = 3000;
69 module_param(timeout, uint, S_IRUGO | S_IWUSR);
70 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
71                  "Pass -1 for infinite timeout");
72
73 /* Maximum amount of mismatched bytes in buffer to print */
74 #define MAX_ERROR_COUNT         32
75
76 /*
77  * Initialization patterns. All bytes in the source buffer has bit 7
78  * set, all bytes in the destination buffer has bit 7 cleared.
79  *
80  * Bit 6 is set for all bytes which are to be copied by the DMA
81  * engine. Bit 5 is set for all bytes which are to be overwritten by
82  * the DMA engine.
83  *
84  * The remaining bits are the inverse of a counter which increments by
85  * one for each byte address.
86  */
87 #define PATTERN_SRC             0x80
88 #define PATTERN_DST             0x00
89 #define PATTERN_COPY            0x40
90 #define PATTERN_OVERWRITE       0x20
91 #define PATTERN_COUNT_MASK      0x1f
92
93 struct dmatest_info;
94
95 struct dmatest_thread {
96         struct list_head        node;
97         struct dmatest_info     *info;
98         struct task_struct      *task;
99         struct dma_chan         *chan;
100         u8                      **srcs;
101         u8                      **dsts;
102         enum dma_transaction_type type;
103         bool                    done;
104 };
105
106 struct dmatest_chan {
107         struct list_head        node;
108         struct dma_chan         *chan;
109         struct list_head        threads;
110 };
111
112 /**
113  * struct dmatest_params - test parameters.
114  * @buf_size:           size of the memcpy test buffer
115  * @channel:            bus ID of the channel to test
116  * @device:             bus ID of the DMA Engine to test
117  * @threads_per_chan:   number of threads to start per channel
118  * @max_channels:       maximum number of channels to use
119  * @iterations:         iterations before stopping test
120  * @xor_sources:        number of xor source buffers
121  * @pq_sources:         number of p+q source buffers
122  * @timeout:            transfer timeout in msec, -1 for infinite timeout
123  */
124 struct dmatest_params {
125         unsigned int    buf_size;
126         char            channel[20];
127         char            device[20];
128         unsigned int    threads_per_chan;
129         unsigned int    max_channels;
130         unsigned int    iterations;
131         unsigned int    xor_sources;
132         unsigned int    pq_sources;
133         int             timeout;
134 };
135
136 /**
137  * struct dmatest_info - test information.
138  * @params:             test parameters
139  * @lock:               access protection to the fields of this structure
140  */
141 struct dmatest_info {
142         /* Test parameters */
143         struct dmatest_params   params;
144
145         /* Internal state */
146         struct list_head        channels;
147         unsigned int            nr_channels;
148         struct mutex            lock;
149
150         /* debugfs related stuff */
151         struct dentry           *root;
152 };
153
154 static struct dmatest_info test_info;
155
156 static bool dmatest_match_channel(struct dmatest_params *params,
157                 struct dma_chan *chan)
158 {
159         if (params->channel[0] == '\0')
160                 return true;
161         return strcmp(dma_chan_name(chan), params->channel) == 0;
162 }
163
164 static bool dmatest_match_device(struct dmatest_params *params,
165                 struct dma_device *device)
166 {
167         if (params->device[0] == '\0')
168                 return true;
169         return strcmp(dev_name(device->dev), params->device) == 0;
170 }
171
172 static unsigned long dmatest_random(void)
173 {
174         unsigned long buf;
175
176         get_random_bytes(&buf, sizeof(buf));
177         return buf;
178 }
179
180 static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len,
181                 unsigned int buf_size)
182 {
183         unsigned int i;
184         u8 *buf;
185
186         for (; (buf = *bufs); bufs++) {
187                 for (i = 0; i < start; i++)
188                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
189                 for ( ; i < start + len; i++)
190                         buf[i] = PATTERN_SRC | PATTERN_COPY
191                                 | (~i & PATTERN_COUNT_MASK);
192                 for ( ; i < buf_size; i++)
193                         buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK);
194                 buf++;
195         }
196 }
197
198 static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len,
199                 unsigned int buf_size)
200 {
201         unsigned int i;
202         u8 *buf;
203
204         for (; (buf = *bufs); bufs++) {
205                 for (i = 0; i < start; i++)
206                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
207                 for ( ; i < start + len; i++)
208                         buf[i] = PATTERN_DST | PATTERN_OVERWRITE
209                                 | (~i & PATTERN_COUNT_MASK);
210                 for ( ; i < buf_size; i++)
211                         buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK);
212         }
213 }
214
215 static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index,
216                 unsigned int counter, bool is_srcbuf)
217 {
218         u8              diff = actual ^ pattern;
219         u8              expected = pattern | (~counter & PATTERN_COUNT_MASK);
220         const char      *thread_name = current->comm;
221
222         if (is_srcbuf)
223                 pr_warn("%s: srcbuf[0x%x] overwritten! Expected %02x, got %02x\n",
224                         thread_name, index, expected, actual);
225         else if ((pattern & PATTERN_COPY)
226                         && (diff & (PATTERN_COPY | PATTERN_OVERWRITE)))
227                 pr_warn("%s: dstbuf[0x%x] not copied! Expected %02x, got %02x\n",
228                         thread_name, index, expected, actual);
229         else if (diff & PATTERN_SRC)
230                 pr_warn("%s: dstbuf[0x%x] was copied! Expected %02x, got %02x\n",
231                         thread_name, index, expected, actual);
232         else
233                 pr_warn("%s: dstbuf[0x%x] mismatch! Expected %02x, got %02x\n",
234                         thread_name, index, expected, actual);
235 }
236
237 static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
238                 unsigned int end, unsigned int counter, u8 pattern,
239                 bool is_srcbuf)
240 {
241         unsigned int i;
242         unsigned int error_count = 0;
243         u8 actual;
244         u8 expected;
245         u8 *buf;
246         unsigned int counter_orig = counter;
247
248         for (; (buf = *bufs); bufs++) {
249                 counter = counter_orig;
250                 for (i = start; i < end; i++) {
251                         actual = buf[i];
252                         expected = pattern | (~counter & PATTERN_COUNT_MASK);
253                         if (actual != expected) {
254                                 if (error_count < MAX_ERROR_COUNT)
255                                         dmatest_mismatch(actual, pattern, i,
256                                                          counter, is_srcbuf);
257                                 error_count++;
258                         }
259                         counter++;
260                 }
261         }
262
263         if (error_count > MAX_ERROR_COUNT)
264                 pr_warn("%s: %u errors suppressed\n",
265                         current->comm, error_count - MAX_ERROR_COUNT);
266
267         return error_count;
268 }
269
270 /* poor man's completion - we want to use wait_event_freezable() on it */
271 struct dmatest_done {
272         bool                    done;
273         wait_queue_head_t       *wait;
274 };
275
276 static void dmatest_callback(void *arg)
277 {
278         struct dmatest_done *done = arg;
279
280         done->done = true;
281         wake_up_all(done->wait);
282 }
283
284 static inline void unmap_src(struct device *dev, dma_addr_t *addr, size_t len,
285                              unsigned int count)
286 {
287         while (count--)
288                 dma_unmap_single(dev, addr[count], len, DMA_TO_DEVICE);
289 }
290
291 static inline void unmap_dst(struct device *dev, dma_addr_t *addr, size_t len,
292                              unsigned int count)
293 {
294         while (count--)
295                 dma_unmap_single(dev, addr[count], len, DMA_BIDIRECTIONAL);
296 }
297
298 static unsigned int min_odd(unsigned int x, unsigned int y)
299 {
300         unsigned int val = min(x, y);
301
302         return val % 2 ? val : val - 1;
303 }
304
305 static void result(const char *err, unsigned int n, unsigned int src_off,
306                    unsigned int dst_off, unsigned int len, unsigned long data)
307 {
308         pr_info("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
309                 current->comm, n, err, src_off, dst_off, len, data);
310 }
311
312 static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
313                        unsigned int dst_off, unsigned int len,
314                        unsigned long data)
315 {
316         pr_debug("%s: result #%u: '%s' with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)",
317                  current->comm, n, err, src_off, dst_off, len, data);
318 }
319
320 /*
321  * This function repeatedly tests DMA transfers of various lengths and
322  * offsets for a given operation type until it is told to exit by
323  * kthread_stop(). There may be multiple threads running this function
324  * in parallel for a single channel, and there may be multiple channels
325  * being tested in parallel.
326  *
327  * Before each test, the source and destination buffer is initialized
328  * with a known pattern. This pattern is different depending on
329  * whether it's in an area which is supposed to be copied or
330  * overwritten, and different in the source and destination buffers.
331  * So if the DMA engine doesn't copy exactly what we tell it to copy,
332  * we'll notice.
333  */
334 static int dmatest_func(void *data)
335 {
336         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
337         struct dmatest_thread   *thread = data;
338         struct dmatest_done     done = { .wait = &done_wait };
339         struct dmatest_info     *info;
340         struct dmatest_params   *params;
341         struct dma_chan         *chan;
342         struct dma_device       *dev;
343         unsigned int            src_off, dst_off, len;
344         unsigned int            error_count;
345         unsigned int            failed_tests = 0;
346         unsigned int            total_tests = 0;
347         dma_cookie_t            cookie;
348         enum dma_status         status;
349         enum dma_ctrl_flags     flags;
350         u8                      *pq_coefs = NULL;
351         int                     ret;
352         int                     src_cnt;
353         int                     dst_cnt;
354         int                     i;
355
356         set_freezable();
357
358         ret = -ENOMEM;
359
360         smp_rmb();
361         info = thread->info;
362         params = &info->params;
363         chan = thread->chan;
364         dev = chan->device;
365         if (thread->type == DMA_MEMCPY)
366                 src_cnt = dst_cnt = 1;
367         else if (thread->type == DMA_XOR) {
368                 /* force odd to ensure dst = src */
369                 src_cnt = min_odd(params->xor_sources | 1, dev->max_xor);
370                 dst_cnt = 1;
371         } else if (thread->type == DMA_PQ) {
372                 /* force odd to ensure dst = src */
373                 src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0));
374                 dst_cnt = 2;
375
376                 pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL);
377                 if (!pq_coefs)
378                         goto err_thread_type;
379
380                 for (i = 0; i < src_cnt; i++)
381                         pq_coefs[i] = 1;
382         } else
383                 goto err_thread_type;
384
385         thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL);
386         if (!thread->srcs)
387                 goto err_srcs;
388         for (i = 0; i < src_cnt; i++) {
389                 thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL);
390                 if (!thread->srcs[i])
391                         goto err_srcbuf;
392         }
393         thread->srcs[i] = NULL;
394
395         thread->dsts = kcalloc(dst_cnt+1, sizeof(u8 *), GFP_KERNEL);
396         if (!thread->dsts)
397                 goto err_dsts;
398         for (i = 0; i < dst_cnt; i++) {
399                 thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL);
400                 if (!thread->dsts[i])
401                         goto err_dstbuf;
402         }
403         thread->dsts[i] = NULL;
404
405         set_user_nice(current, 10);
406
407         /*
408          * src and dst buffers are freed by ourselves below
409          */
410         flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
411
412         while (!kthread_should_stop()
413                && !(params->iterations && total_tests >= params->iterations)) {
414                 struct dma_async_tx_descriptor *tx = NULL;
415                 dma_addr_t dma_srcs[src_cnt];
416                 dma_addr_t dma_dsts[dst_cnt];
417                 u8 align = 0;
418
419                 total_tests++;
420
421                 /* honor alignment restrictions */
422                 if (thread->type == DMA_MEMCPY)
423                         align = dev->copy_align;
424                 else if (thread->type == DMA_XOR)
425                         align = dev->xor_align;
426                 else if (thread->type == DMA_PQ)
427                         align = dev->pq_align;
428
429                 if (1 << align > params->buf_size) {
430                         pr_err("%u-byte buffer too small for %d-byte alignment\n",
431                                params->buf_size, 1 << align);
432                         break;
433                 }
434
435                 len = dmatest_random() % params->buf_size + 1;
436                 len = (len >> align) << align;
437                 if (!len)
438                         len = 1 << align;
439                 src_off = dmatest_random() % (params->buf_size - len + 1);
440                 dst_off = dmatest_random() % (params->buf_size - len + 1);
441
442                 src_off = (src_off >> align) << align;
443                 dst_off = (dst_off >> align) << align;
444
445                 dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size);
446                 dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size);
447
448                 for (i = 0; i < src_cnt; i++) {
449                         u8 *buf = thread->srcs[i] + src_off;
450
451                         dma_srcs[i] = dma_map_single(dev->dev, buf, len,
452                                                      DMA_TO_DEVICE);
453                         ret = dma_mapping_error(dev->dev, dma_srcs[i]);
454                         if (ret) {
455                                 unmap_src(dev->dev, dma_srcs, len, i);
456                                 result("src mapping error", total_tests,
457                                        src_off, dst_off, len, ret);
458                                 failed_tests++;
459                                 continue;
460                         }
461                 }
462                 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
463                 for (i = 0; i < dst_cnt; i++) {
464                         dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i],
465                                                      params->buf_size,
466                                                      DMA_BIDIRECTIONAL);
467                         ret = dma_mapping_error(dev->dev, dma_dsts[i]);
468                         if (ret) {
469                                 unmap_src(dev->dev, dma_srcs, len, src_cnt);
470                                 unmap_dst(dev->dev, dma_dsts, params->buf_size,
471                                           i);
472                                 result("dst mapping error", total_tests,
473                                        src_off, dst_off, len, ret);
474                                 failed_tests++;
475                                 continue;
476                         }
477                 }
478
479                 if (thread->type == DMA_MEMCPY)
480                         tx = dev->device_prep_dma_memcpy(chan,
481                                                          dma_dsts[0] + dst_off,
482                                                          dma_srcs[0], len,
483                                                          flags);
484                 else if (thread->type == DMA_XOR)
485                         tx = dev->device_prep_dma_xor(chan,
486                                                       dma_dsts[0] + dst_off,
487                                                       dma_srcs, src_cnt,
488                                                       len, flags);
489                 else if (thread->type == DMA_PQ) {
490                         dma_addr_t dma_pq[dst_cnt];
491
492                         for (i = 0; i < dst_cnt; i++)
493                                 dma_pq[i] = dma_dsts[i] + dst_off;
494                         tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
495                                                      src_cnt, pq_coefs,
496                                                      len, flags);
497                 }
498
499                 if (!tx) {
500                         unmap_src(dev->dev, dma_srcs, len, src_cnt);
501                         unmap_dst(dev->dev, dma_dsts, params->buf_size,
502                                   dst_cnt);
503                         result("prep error", total_tests, src_off,
504                                dst_off, len, ret);
505                         msleep(100);
506                         failed_tests++;
507                         continue;
508                 }
509
510                 done.done = false;
511                 tx->callback = dmatest_callback;
512                 tx->callback_param = &done;
513                 cookie = tx->tx_submit(tx);
514
515                 if (dma_submit_error(cookie)) {
516                         result("submit error", total_tests, src_off,
517                                dst_off, len, ret);
518                         msleep(100);
519                         failed_tests++;
520                         continue;
521                 }
522                 dma_async_issue_pending(chan);
523
524                 wait_event_freezable_timeout(done_wait, done.done,
525                                              msecs_to_jiffies(params->timeout));
526
527                 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
528
529                 if (!done.done) {
530                         /*
531                          * We're leaving the timed out dma operation with
532                          * dangling pointer to done_wait.  To make this
533                          * correct, we'll need to allocate wait_done for
534                          * each test iteration and perform "who's gonna
535                          * free it this time?" dancing.  For now, just
536                          * leave it dangling.
537                          */
538                         result("test timed out", total_tests, src_off, dst_off,
539                                len, 0);
540                         failed_tests++;
541                         continue;
542                 } else if (status != DMA_SUCCESS) {
543                         result(status == DMA_ERROR ?
544                                "completion error status" :
545                                "completion busy status", total_tests, src_off,
546                                dst_off, len, ret);
547                         failed_tests++;
548                         continue;
549                 }
550
551                 /* Unmap by myself */
552                 unmap_src(dev->dev, dma_srcs, len, src_cnt);
553                 unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt);
554
555                 error_count = 0;
556
557                 pr_debug("%s: verifying source buffer...\n", current->comm);
558                 error_count += dmatest_verify(thread->srcs, 0, src_off,
559                                 0, PATTERN_SRC, true);
560                 error_count += dmatest_verify(thread->srcs, src_off,
561                                 src_off + len, src_off,
562                                 PATTERN_SRC | PATTERN_COPY, true);
563                 error_count += dmatest_verify(thread->srcs, src_off + len,
564                                 params->buf_size, src_off + len,
565                                 PATTERN_SRC, true);
566
567                 pr_debug("%s: verifying dest buffer...\n", current->comm);
568                 error_count += dmatest_verify(thread->dsts, 0, dst_off,
569                                 0, PATTERN_DST, false);
570                 error_count += dmatest_verify(thread->dsts, dst_off,
571                                 dst_off + len, src_off,
572                                 PATTERN_SRC | PATTERN_COPY, false);
573                 error_count += dmatest_verify(thread->dsts, dst_off + len,
574                                 params->buf_size, dst_off + len,
575                                 PATTERN_DST, false);
576
577                 if (error_count) {
578                         result("data error", total_tests, src_off, dst_off,
579                                len, error_count);
580                         failed_tests++;
581                 } else {
582                         dbg_result("test passed", total_tests, src_off, dst_off,
583                                    len, 0);
584                 }
585         }
586
587         ret = 0;
588         for (i = 0; thread->dsts[i]; i++)
589                 kfree(thread->dsts[i]);
590 err_dstbuf:
591         kfree(thread->dsts);
592 err_dsts:
593         for (i = 0; thread->srcs[i]; i++)
594                 kfree(thread->srcs[i]);
595 err_srcbuf:
596         kfree(thread->srcs);
597 err_srcs:
598         kfree(pq_coefs);
599 err_thread_type:
600         pr_info("%s: terminating after %u tests, %u failures (status %d)\n",
601                 current->comm, total_tests, failed_tests, ret);
602
603         /* terminate all transfers on specified channels */
604         if (ret)
605                 dmaengine_terminate_all(chan);
606
607         thread->done = true;
608
609         if (params->iterations > 0)
610                 while (!kthread_should_stop()) {
611                         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
612                         interruptible_sleep_on(&wait_dmatest_exit);
613                 }
614
615         return ret;
616 }
617
618 static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
619 {
620         struct dmatest_thread   *thread;
621         struct dmatest_thread   *_thread;
622         int                     ret;
623
624         list_for_each_entry_safe(thread, _thread, &dtc->threads, node) {
625                 ret = kthread_stop(thread->task);
626                 pr_debug("dmatest: thread %s exited with status %d\n",
627                                 thread->task->comm, ret);
628                 list_del(&thread->node);
629                 kfree(thread);
630         }
631
632         /* terminate all transfers on specified channels */
633         dmaengine_terminate_all(dtc->chan);
634
635         kfree(dtc);
636 }
637
638 static int dmatest_add_threads(struct dmatest_info *info,
639                 struct dmatest_chan *dtc, enum dma_transaction_type type)
640 {
641         struct dmatest_params *params = &info->params;
642         struct dmatest_thread *thread;
643         struct dma_chan *chan = dtc->chan;
644         char *op;
645         unsigned int i;
646
647         if (type == DMA_MEMCPY)
648                 op = "copy";
649         else if (type == DMA_XOR)
650                 op = "xor";
651         else if (type == DMA_PQ)
652                 op = "pq";
653         else
654                 return -EINVAL;
655
656         for (i = 0; i < params->threads_per_chan; i++) {
657                 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
658                 if (!thread) {
659                         pr_warning("dmatest: No memory for %s-%s%u\n",
660                                    dma_chan_name(chan), op, i);
661
662                         break;
663                 }
664                 thread->info = info;
665                 thread->chan = dtc->chan;
666                 thread->type = type;
667                 smp_wmb();
668                 thread->task = kthread_run(dmatest_func, thread, "%s-%s%u",
669                                 dma_chan_name(chan), op, i);
670                 if (IS_ERR(thread->task)) {
671                         pr_warning("dmatest: Failed to run thread %s-%s%u\n",
672                                         dma_chan_name(chan), op, i);
673                         kfree(thread);
674                         break;
675                 }
676
677                 /* srcbuf and dstbuf are allocated by the thread itself */
678
679                 list_add_tail(&thread->node, &dtc->threads);
680         }
681
682         return i;
683 }
684
685 static int dmatest_add_channel(struct dmatest_info *info,
686                 struct dma_chan *chan)
687 {
688         struct dmatest_chan     *dtc;
689         struct dma_device       *dma_dev = chan->device;
690         unsigned int            thread_count = 0;
691         int cnt;
692
693         dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
694         if (!dtc) {
695                 pr_warning("dmatest: No memory for %s\n", dma_chan_name(chan));
696                 return -ENOMEM;
697         }
698
699         dtc->chan = chan;
700         INIT_LIST_HEAD(&dtc->threads);
701
702         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
703                 cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
704                 thread_count += cnt > 0 ? cnt : 0;
705         }
706         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
707                 cnt = dmatest_add_threads(info, dtc, DMA_XOR);
708                 thread_count += cnt > 0 ? cnt : 0;
709         }
710         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
711                 cnt = dmatest_add_threads(info, dtc, DMA_PQ);
712                 thread_count += cnt > 0 ? cnt : 0;
713         }
714
715         pr_info("dmatest: Started %u threads using %s\n",
716                 thread_count, dma_chan_name(chan));
717
718         list_add_tail(&dtc->node, &info->channels);
719         info->nr_channels++;
720
721         return 0;
722 }
723
724 static bool filter(struct dma_chan *chan, void *param)
725 {
726         struct dmatest_params *params = param;
727
728         if (!dmatest_match_channel(params, chan) ||
729             !dmatest_match_device(params, chan->device))
730                 return false;
731         else
732                 return true;
733 }
734
735 static int __run_threaded_test(struct dmatest_info *info)
736 {
737         dma_cap_mask_t mask;
738         struct dma_chan *chan;
739         struct dmatest_params *params = &info->params;
740         int err = 0;
741
742         dma_cap_zero(mask);
743         dma_cap_set(DMA_MEMCPY, mask);
744         for (;;) {
745                 chan = dma_request_channel(mask, filter, params);
746                 if (chan) {
747                         err = dmatest_add_channel(info, chan);
748                         if (err) {
749                                 dma_release_channel(chan);
750                                 break; /* add_channel failed, punt */
751                         }
752                 } else
753                         break; /* no more channels available */
754                 if (params->max_channels &&
755                     info->nr_channels >= params->max_channels)
756                         break; /* we have all we need */
757         }
758         return err;
759 }
760
761 #ifndef MODULE
762 static int run_threaded_test(struct dmatest_info *info)
763 {
764         int ret;
765
766         mutex_lock(&info->lock);
767         ret = __run_threaded_test(info);
768         mutex_unlock(&info->lock);
769         return ret;
770 }
771 #endif
772
773 static void __stop_threaded_test(struct dmatest_info *info)
774 {
775         struct dmatest_chan *dtc, *_dtc;
776         struct dma_chan *chan;
777
778         list_for_each_entry_safe(dtc, _dtc, &info->channels, node) {
779                 list_del(&dtc->node);
780                 chan = dtc->chan;
781                 dmatest_cleanup_channel(dtc);
782                 pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan));
783                 dma_release_channel(chan);
784         }
785
786         info->nr_channels = 0;
787 }
788
789 static void stop_threaded_test(struct dmatest_info *info)
790 {
791         mutex_lock(&info->lock);
792         __stop_threaded_test(info);
793         mutex_unlock(&info->lock);
794 }
795
796 static int __restart_threaded_test(struct dmatest_info *info, bool run)
797 {
798         struct dmatest_params *params = &info->params;
799
800         /* Stop any running test first */
801         __stop_threaded_test(info);
802
803         if (run == false)
804                 return 0;
805
806         /* Copy test parameters */
807         params->buf_size = test_buf_size;
808         strlcpy(params->channel, strim(test_channel), sizeof(params->channel));
809         strlcpy(params->device, strim(test_device), sizeof(params->device));
810         params->threads_per_chan = threads_per_chan;
811         params->max_channels = max_channels;
812         params->iterations = iterations;
813         params->xor_sources = xor_sources;
814         params->pq_sources = pq_sources;
815         params->timeout = timeout;
816
817         /* Run test with new parameters */
818         return __run_threaded_test(info);
819 }
820
821 static bool __is_threaded_test_run(struct dmatest_info *info)
822 {
823         struct dmatest_chan *dtc;
824
825         list_for_each_entry(dtc, &info->channels, node) {
826                 struct dmatest_thread *thread;
827
828                 list_for_each_entry(thread, &dtc->threads, node) {
829                         if (!thread->done)
830                                 return true;
831                 }
832         }
833
834         return false;
835 }
836
837 static ssize_t dtf_read_run(struct file *file, char __user *user_buf,
838                 size_t count, loff_t *ppos)
839 {
840         struct dmatest_info *info = file->private_data;
841         char buf[3];
842
843         mutex_lock(&info->lock);
844
845         if (__is_threaded_test_run(info)) {
846                 buf[0] = 'Y';
847         } else {
848                 __stop_threaded_test(info);
849                 buf[0] = 'N';
850         }
851
852         mutex_unlock(&info->lock);
853         buf[1] = '\n';
854         buf[2] = 0x00;
855         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
856 }
857
858 static ssize_t dtf_write_run(struct file *file, const char __user *user_buf,
859                 size_t count, loff_t *ppos)
860 {
861         struct dmatest_info *info = file->private_data;
862         char buf[16];
863         bool bv;
864         int ret = 0;
865
866         if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1))))
867                 return -EFAULT;
868
869         if (strtobool(buf, &bv) == 0) {
870                 mutex_lock(&info->lock);
871
872                 if (__is_threaded_test_run(info))
873                         ret = -EBUSY;
874                 else
875                         ret = __restart_threaded_test(info, bv);
876
877                 mutex_unlock(&info->lock);
878         }
879
880         return ret ? ret : count;
881 }
882
883 static const struct file_operations dtf_run_fops = {
884         .read   = dtf_read_run,
885         .write  = dtf_write_run,
886         .open   = simple_open,
887         .llseek = default_llseek,
888 };
889
890 static int dmatest_register_dbgfs(struct dmatest_info *info)
891 {
892         struct dentry *d;
893
894         d = debugfs_create_dir("dmatest", NULL);
895         if (IS_ERR(d))
896                 return PTR_ERR(d);
897         if (!d)
898                 goto err_root;
899
900         info->root = d;
901
902         /* Run or stop threaded test */
903         debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, info,
904                             &dtf_run_fops);
905
906         return 0;
907
908 err_root:
909         pr_err("dmatest: Failed to initialize debugfs\n");
910         return -ENOMEM;
911 }
912
913 static int __init dmatest_init(void)
914 {
915         struct dmatest_info *info = &test_info;
916         int ret;
917
918         memset(info, 0, sizeof(*info));
919
920         mutex_init(&info->lock);
921         INIT_LIST_HEAD(&info->channels);
922
923         ret = dmatest_register_dbgfs(info);
924         if (ret)
925                 return ret;
926
927 #ifdef MODULE
928         return 0;
929 #else
930         return run_threaded_test(info);
931 #endif
932 }
933 /* when compiled-in wait for drivers to load first */
934 late_initcall(dmatest_init);
935
936 static void __exit dmatest_exit(void)
937 {
938         struct dmatest_info *info = &test_info;
939
940         debugfs_remove_recursive(info->root);
941         stop_threaded_test(info);
942 }
943 module_exit(dmatest_exit);
944
945 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
946 MODULE_LICENSE("GPL v2");