Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / drivers / dma / mv_xor.c
1 /*
2  * offload engine driver for the Marvell XOR engine
3  * Copyright (C) 2007, 2008, Marvell International Ltd.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  */
18
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <linux/clk.h>
29 #include <linux/of.h>
30 #include <linux/of_irq.h>
31 #include <linux/irqdomain.h>
32 #include <linux/platform_data/dma-mv_xor.h>
33
34 #include "dmaengine.h"
35 #include "mv_xor.h"
36
37 static void mv_xor_issue_pending(struct dma_chan *chan);
38
39 #define to_mv_xor_chan(chan)            \
40         container_of(chan, struct mv_xor_chan, dmachan)
41
42 #define to_mv_xor_slot(tx)              \
43         container_of(tx, struct mv_xor_desc_slot, async_tx)
44
45 #define mv_chan_to_devp(chan)           \
46         ((chan)->dmadev.dev)
47
48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
49 {
50         struct mv_xor_desc *hw_desc = desc->hw_desc;
51
52         hw_desc->status = (1 << 31);
53         hw_desc->phy_next_desc = 0;
54         hw_desc->desc_command = (1 << 31);
55 }
56
57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
58 {
59         struct mv_xor_desc *hw_desc = desc->hw_desc;
60         return hw_desc->phy_dest_addr;
61 }
62
63 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
64                                    u32 byte_count)
65 {
66         struct mv_xor_desc *hw_desc = desc->hw_desc;
67         hw_desc->byte_count = byte_count;
68 }
69
70 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
71                                   u32 next_desc_addr)
72 {
73         struct mv_xor_desc *hw_desc = desc->hw_desc;
74         BUG_ON(hw_desc->phy_next_desc);
75         hw_desc->phy_next_desc = next_desc_addr;
76 }
77
78 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
79 {
80         struct mv_xor_desc *hw_desc = desc->hw_desc;
81         hw_desc->phy_next_desc = 0;
82 }
83
84 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
85                                   dma_addr_t addr)
86 {
87         struct mv_xor_desc *hw_desc = desc->hw_desc;
88         hw_desc->phy_dest_addr = addr;
89 }
90
91 static int mv_chan_memset_slot_count(size_t len)
92 {
93         return 1;
94 }
95
96 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
97
98 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
99                                  int index, dma_addr_t addr)
100 {
101         struct mv_xor_desc *hw_desc = desc->hw_desc;
102         hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
103         if (desc->type == DMA_XOR)
104                 hw_desc->desc_command |= (1 << index);
105 }
106
107 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
108 {
109         return readl_relaxed(XOR_CURR_DESC(chan));
110 }
111
112 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
113                                         u32 next_desc_addr)
114 {
115         writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
116 }
117
118 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
119 {
120         u32 val = readl_relaxed(XOR_INTR_MASK(chan));
121         val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
122         writel_relaxed(val, XOR_INTR_MASK(chan));
123 }
124
125 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
126 {
127         u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
128         intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
129         return intr_cause;
130 }
131
132 static int mv_is_err_intr(u32 intr_cause)
133 {
134         if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
135                 return 1;
136
137         return 0;
138 }
139
140 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
141 {
142         u32 val = ~(1 << (chan->idx * 16));
143         dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
144         writel_relaxed(val, XOR_INTR_CAUSE(chan));
145 }
146
147 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
148 {
149         u32 val = 0xFFFF0000 >> (chan->idx * 16);
150         writel_relaxed(val, XOR_INTR_CAUSE(chan));
151 }
152
153 static int mv_can_chain(struct mv_xor_desc_slot *desc)
154 {
155         struct mv_xor_desc_slot *chain_old_tail = list_entry(
156                 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
157
158         if (chain_old_tail->type != desc->type)
159                 return 0;
160
161         return 1;
162 }
163
164 static void mv_set_mode(struct mv_xor_chan *chan,
165                                enum dma_transaction_type type)
166 {
167         u32 op_mode;
168         u32 config = readl_relaxed(XOR_CONFIG(chan));
169
170         switch (type) {
171         case DMA_XOR:
172                 op_mode = XOR_OPERATION_MODE_XOR;
173                 break;
174         case DMA_MEMCPY:
175                 op_mode = XOR_OPERATION_MODE_MEMCPY;
176                 break;
177         default:
178                 dev_err(mv_chan_to_devp(chan),
179                         "error: unsupported operation %d\n",
180                         type);
181                 BUG();
182                 return;
183         }
184
185         config &= ~0x7;
186         config |= op_mode;
187
188 #if defined(__BIG_ENDIAN)
189         config |= XOR_DESCRIPTOR_SWAP;
190 #else
191         config &= ~XOR_DESCRIPTOR_SWAP;
192 #endif
193
194         writel_relaxed(config, XOR_CONFIG(chan));
195         chan->current_type = type;
196 }
197
198 static void mv_chan_activate(struct mv_xor_chan *chan)
199 {
200         u32 activation;
201
202         dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
203         activation = readl_relaxed(XOR_ACTIVATION(chan));
204         activation |= 0x1;
205         writel_relaxed(activation, XOR_ACTIVATION(chan));
206 }
207
208 static char mv_chan_is_busy(struct mv_xor_chan *chan)
209 {
210         u32 state = readl_relaxed(XOR_ACTIVATION(chan));
211
212         state = (state >> 4) & 0x3;
213
214         return (state == 1) ? 1 : 0;
215 }
216
217 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
218 {
219         return 1;
220 }
221
222 /**
223  * mv_xor_free_slots - flags descriptor slots for reuse
224  * @slot: Slot to free
225  * Caller must hold &mv_chan->lock while calling this function
226  */
227 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
228                               struct mv_xor_desc_slot *slot)
229 {
230         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
231                 __func__, __LINE__, slot);
232
233         slot->slots_per_op = 0;
234
235 }
236
237 /*
238  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
239  * sw_desc
240  * Caller must hold &mv_chan->lock while calling this function
241  */
242 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
243                                    struct mv_xor_desc_slot *sw_desc)
244 {
245         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
246                 __func__, __LINE__, sw_desc);
247         if (sw_desc->type != mv_chan->current_type)
248                 mv_set_mode(mv_chan, sw_desc->type);
249
250         /* set the hardware chain */
251         mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
252
253         mv_chan->pending += sw_desc->slot_cnt;
254         mv_xor_issue_pending(&mv_chan->dmachan);
255 }
256
257 static dma_cookie_t
258 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
259         struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
260 {
261         BUG_ON(desc->async_tx.cookie < 0);
262
263         if (desc->async_tx.cookie > 0) {
264                 cookie = desc->async_tx.cookie;
265
266                 /* call the callback (must not sleep or submit new
267                  * operations to this channel)
268                  */
269                 if (desc->async_tx.callback)
270                         desc->async_tx.callback(
271                                 desc->async_tx.callback_param);
272
273                 dma_descriptor_unmap(&desc->async_tx);
274                 if (desc->group_head)
275                         desc->group_head = NULL;
276         }
277
278         /* run dependent operations */
279         dma_run_dependencies(&desc->async_tx);
280
281         return cookie;
282 }
283
284 static int
285 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
286 {
287         struct mv_xor_desc_slot *iter, *_iter;
288
289         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
290         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
291                                  completed_node) {
292
293                 if (async_tx_test_ack(&iter->async_tx)) {
294                         list_del(&iter->completed_node);
295                         mv_xor_free_slots(mv_chan, iter);
296                 }
297         }
298         return 0;
299 }
300
301 static int
302 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
303         struct mv_xor_chan *mv_chan)
304 {
305         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
306                 __func__, __LINE__, desc, desc->async_tx.flags);
307         list_del(&desc->chain_node);
308         /* the client is allowed to attach dependent operations
309          * until 'ack' is set
310          */
311         if (!async_tx_test_ack(&desc->async_tx)) {
312                 /* move this slot to the completed_slots */
313                 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
314                 return 0;
315         }
316
317         mv_xor_free_slots(mv_chan, desc);
318         return 0;
319 }
320
321 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
322 {
323         struct mv_xor_desc_slot *iter, *_iter;
324         dma_cookie_t cookie = 0;
325         int busy = mv_chan_is_busy(mv_chan);
326         u32 current_desc = mv_chan_get_current_desc(mv_chan);
327         int seen_current = 0;
328
329         dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
330         dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
331         mv_xor_clean_completed_slots(mv_chan);
332
333         /* free completed slots from the chain starting with
334          * the oldest descriptor
335          */
336
337         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
338                                         chain_node) {
339                 prefetch(_iter);
340                 prefetch(&_iter->async_tx);
341
342                 /* do not advance past the current descriptor loaded into the
343                  * hardware channel, subsequent descriptors are either in
344                  * process or have not been submitted
345                  */
346                 if (seen_current)
347                         break;
348
349                 /* stop the search if we reach the current descriptor and the
350                  * channel is busy
351                  */
352                 if (iter->async_tx.phys == current_desc) {
353                         seen_current = 1;
354                         if (busy)
355                                 break;
356                 }
357
358                 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
359
360                 if (mv_xor_clean_slot(iter, mv_chan))
361                         break;
362         }
363
364         if ((busy == 0) && !list_empty(&mv_chan->chain)) {
365                 struct mv_xor_desc_slot *chain_head;
366                 chain_head = list_entry(mv_chan->chain.next,
367                                         struct mv_xor_desc_slot,
368                                         chain_node);
369
370                 mv_xor_start_new_chain(mv_chan, chain_head);
371         }
372
373         if (cookie > 0)
374                 mv_chan->dmachan.completed_cookie = cookie;
375 }
376
377 static void
378 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
379 {
380         spin_lock_bh(&mv_chan->lock);
381         __mv_xor_slot_cleanup(mv_chan);
382         spin_unlock_bh(&mv_chan->lock);
383 }
384
385 static void mv_xor_tasklet(unsigned long data)
386 {
387         struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
388         mv_xor_slot_cleanup(chan);
389 }
390
391 static struct mv_xor_desc_slot *
392 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
393                     int slots_per_op)
394 {
395         struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
396         LIST_HEAD(chain);
397         int slots_found, retry = 0;
398
399         /* start search from the last allocated descrtiptor
400          * if a contiguous allocation can not be found start searching
401          * from the beginning of the list
402          */
403 retry:
404         slots_found = 0;
405         if (retry == 0)
406                 iter = mv_chan->last_used;
407         else
408                 iter = list_entry(&mv_chan->all_slots,
409                         struct mv_xor_desc_slot,
410                         slot_node);
411
412         list_for_each_entry_safe_continue(
413                 iter, _iter, &mv_chan->all_slots, slot_node) {
414                 prefetch(_iter);
415                 prefetch(&_iter->async_tx);
416                 if (iter->slots_per_op) {
417                         /* give up after finding the first busy slot
418                          * on the second pass through the list
419                          */
420                         if (retry)
421                                 break;
422
423                         slots_found = 0;
424                         continue;
425                 }
426
427                 /* start the allocation if the slot is correctly aligned */
428                 if (!slots_found++)
429                         alloc_start = iter;
430
431                 if (slots_found == num_slots) {
432                         struct mv_xor_desc_slot *alloc_tail = NULL;
433                         struct mv_xor_desc_slot *last_used = NULL;
434                         iter = alloc_start;
435                         while (num_slots) {
436                                 int i;
437
438                                 /* pre-ack all but the last descriptor */
439                                 async_tx_ack(&iter->async_tx);
440
441                                 list_add_tail(&iter->chain_node, &chain);
442                                 alloc_tail = iter;
443                                 iter->async_tx.cookie = 0;
444                                 iter->slot_cnt = num_slots;
445                                 iter->xor_check_result = NULL;
446                                 for (i = 0; i < slots_per_op; i++) {
447                                         iter->slots_per_op = slots_per_op - i;
448                                         last_used = iter;
449                                         iter = list_entry(iter->slot_node.next,
450                                                 struct mv_xor_desc_slot,
451                                                 slot_node);
452                                 }
453                                 num_slots -= slots_per_op;
454                         }
455                         alloc_tail->group_head = alloc_start;
456                         alloc_tail->async_tx.cookie = -EBUSY;
457                         list_splice(&chain, &alloc_tail->tx_list);
458                         mv_chan->last_used = last_used;
459                         mv_desc_clear_next_desc(alloc_start);
460                         mv_desc_clear_next_desc(alloc_tail);
461                         return alloc_tail;
462                 }
463         }
464         if (!retry++)
465                 goto retry;
466
467         /* try to free some slots if the allocation fails */
468         tasklet_schedule(&mv_chan->irq_tasklet);
469
470         return NULL;
471 }
472
473 /************************ DMA engine API functions ****************************/
474 static dma_cookie_t
475 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
476 {
477         struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
478         struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
479         struct mv_xor_desc_slot *grp_start, *old_chain_tail;
480         dma_cookie_t cookie;
481         int new_hw_chain = 1;
482
483         dev_dbg(mv_chan_to_devp(mv_chan),
484                 "%s sw_desc %p: async_tx %p\n",
485                 __func__, sw_desc, &sw_desc->async_tx);
486
487         grp_start = sw_desc->group_head;
488
489         spin_lock_bh(&mv_chan->lock);
490         cookie = dma_cookie_assign(tx);
491
492         if (list_empty(&mv_chan->chain))
493                 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
494         else {
495                 new_hw_chain = 0;
496
497                 old_chain_tail = list_entry(mv_chan->chain.prev,
498                                             struct mv_xor_desc_slot,
499                                             chain_node);
500                 list_splice_init(&grp_start->tx_list,
501                                  &old_chain_tail->chain_node);
502
503                 if (!mv_can_chain(grp_start))
504                         goto submit_done;
505
506                 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
507                         old_chain_tail->async_tx.phys);
508
509                 /* fix up the hardware chain */
510                 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
511
512                 /* if the channel is not busy */
513                 if (!mv_chan_is_busy(mv_chan)) {
514                         u32 current_desc = mv_chan_get_current_desc(mv_chan);
515                         /*
516                          * and the curren desc is the end of the chain before
517                          * the append, then we need to start the channel
518                          */
519                         if (current_desc == old_chain_tail->async_tx.phys)
520                                 new_hw_chain = 1;
521                 }
522         }
523
524         if (new_hw_chain)
525                 mv_xor_start_new_chain(mv_chan, grp_start);
526
527 submit_done:
528         spin_unlock_bh(&mv_chan->lock);
529
530         return cookie;
531 }
532
533 /* returns the number of allocated descriptors */
534 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
535 {
536         char *hw_desc;
537         int idx;
538         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
539         struct mv_xor_desc_slot *slot = NULL;
540         int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
541
542         /* Allocate descriptor slots */
543         idx = mv_chan->slots_allocated;
544         while (idx < num_descs_in_pool) {
545                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
546                 if (!slot) {
547                         printk(KERN_INFO "MV XOR Channel only initialized"
548                                 " %d descriptor slots", idx);
549                         break;
550                 }
551                 hw_desc = (char *) mv_chan->dma_desc_pool_virt;
552                 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
553
554                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
555                 slot->async_tx.tx_submit = mv_xor_tx_submit;
556                 INIT_LIST_HEAD(&slot->chain_node);
557                 INIT_LIST_HEAD(&slot->slot_node);
558                 INIT_LIST_HEAD(&slot->tx_list);
559                 hw_desc = (char *) mv_chan->dma_desc_pool;
560                 slot->async_tx.phys =
561                         (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
562                 slot->idx = idx++;
563
564                 spin_lock_bh(&mv_chan->lock);
565                 mv_chan->slots_allocated = idx;
566                 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
567                 spin_unlock_bh(&mv_chan->lock);
568         }
569
570         if (mv_chan->slots_allocated && !mv_chan->last_used)
571                 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
572                                         struct mv_xor_desc_slot,
573                                         slot_node);
574
575         dev_dbg(mv_chan_to_devp(mv_chan),
576                 "allocated %d descriptor slots last_used: %p\n",
577                 mv_chan->slots_allocated, mv_chan->last_used);
578
579         return mv_chan->slots_allocated ? : -ENOMEM;
580 }
581
582 static struct dma_async_tx_descriptor *
583 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
584                 size_t len, unsigned long flags)
585 {
586         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
587         struct mv_xor_desc_slot *sw_desc, *grp_start;
588         int slot_cnt;
589
590         dev_dbg(mv_chan_to_devp(mv_chan),
591                 "%s dest: %x src %x len: %u flags: %ld\n",
592                 __func__, dest, src, len, flags);
593         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
594                 return NULL;
595
596         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
597
598         spin_lock_bh(&mv_chan->lock);
599         slot_cnt = mv_chan_memcpy_slot_count(len);
600         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
601         if (sw_desc) {
602                 sw_desc->type = DMA_MEMCPY;
603                 sw_desc->async_tx.flags = flags;
604                 grp_start = sw_desc->group_head;
605                 mv_desc_init(grp_start, flags);
606                 mv_desc_set_byte_count(grp_start, len);
607                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
608                 mv_desc_set_src_addr(grp_start, 0, src);
609                 sw_desc->unmap_src_cnt = 1;
610                 sw_desc->unmap_len = len;
611         }
612         spin_unlock_bh(&mv_chan->lock);
613
614         dev_dbg(mv_chan_to_devp(mv_chan),
615                 "%s sw_desc %p async_tx %p\n",
616                 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
617
618         return sw_desc ? &sw_desc->async_tx : NULL;
619 }
620
621 static struct dma_async_tx_descriptor *
622 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
623                     unsigned int src_cnt, size_t len, unsigned long flags)
624 {
625         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
626         struct mv_xor_desc_slot *sw_desc, *grp_start;
627         int slot_cnt;
628
629         if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
630                 return NULL;
631
632         BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
633
634         dev_dbg(mv_chan_to_devp(mv_chan),
635                 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
636                 __func__, src_cnt, len, dest, flags);
637
638         spin_lock_bh(&mv_chan->lock);
639         slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
640         sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
641         if (sw_desc) {
642                 sw_desc->type = DMA_XOR;
643                 sw_desc->async_tx.flags = flags;
644                 grp_start = sw_desc->group_head;
645                 mv_desc_init(grp_start, flags);
646                 /* the byte count field is the same as in memcpy desc*/
647                 mv_desc_set_byte_count(grp_start, len);
648                 mv_desc_set_dest_addr(sw_desc->group_head, dest);
649                 sw_desc->unmap_src_cnt = src_cnt;
650                 sw_desc->unmap_len = len;
651                 while (src_cnt--)
652                         mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
653         }
654         spin_unlock_bh(&mv_chan->lock);
655         dev_dbg(mv_chan_to_devp(mv_chan),
656                 "%s sw_desc %p async_tx %p \n",
657                 __func__, sw_desc, &sw_desc->async_tx);
658         return sw_desc ? &sw_desc->async_tx : NULL;
659 }
660
661 static void mv_xor_free_chan_resources(struct dma_chan *chan)
662 {
663         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
664         struct mv_xor_desc_slot *iter, *_iter;
665         int in_use_descs = 0;
666
667         mv_xor_slot_cleanup(mv_chan);
668
669         spin_lock_bh(&mv_chan->lock);
670         list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
671                                         chain_node) {
672                 in_use_descs++;
673                 list_del(&iter->chain_node);
674         }
675         list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
676                                  completed_node) {
677                 in_use_descs++;
678                 list_del(&iter->completed_node);
679         }
680         list_for_each_entry_safe_reverse(
681                 iter, _iter, &mv_chan->all_slots, slot_node) {
682                 list_del(&iter->slot_node);
683                 kfree(iter);
684                 mv_chan->slots_allocated--;
685         }
686         mv_chan->last_used = NULL;
687
688         dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
689                 __func__, mv_chan->slots_allocated);
690         spin_unlock_bh(&mv_chan->lock);
691
692         if (in_use_descs)
693                 dev_err(mv_chan_to_devp(mv_chan),
694                         "freeing %d in use descriptors!\n", in_use_descs);
695 }
696
697 /**
698  * mv_xor_status - poll the status of an XOR transaction
699  * @chan: XOR channel handle
700  * @cookie: XOR transaction identifier
701  * @txstate: XOR transactions state holder (or NULL)
702  */
703 static enum dma_status mv_xor_status(struct dma_chan *chan,
704                                           dma_cookie_t cookie,
705                                           struct dma_tx_state *txstate)
706 {
707         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
708         enum dma_status ret;
709
710         ret = dma_cookie_status(chan, cookie, txstate);
711         if (ret == DMA_COMPLETE) {
712                 mv_xor_clean_completed_slots(mv_chan);
713                 return ret;
714         }
715         mv_xor_slot_cleanup(mv_chan);
716
717         return dma_cookie_status(chan, cookie, txstate);
718 }
719
720 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
721 {
722         u32 val;
723
724         val = readl_relaxed(XOR_CONFIG(chan));
725         dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
726
727         val = readl_relaxed(XOR_ACTIVATION(chan));
728         dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
729
730         val = readl_relaxed(XOR_INTR_CAUSE(chan));
731         dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
732
733         val = readl_relaxed(XOR_INTR_MASK(chan));
734         dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
735
736         val = readl_relaxed(XOR_ERROR_CAUSE(chan));
737         dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
738
739         val = readl_relaxed(XOR_ERROR_ADDR(chan));
740         dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
741 }
742
743 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
744                                          u32 intr_cause)
745 {
746         if (intr_cause & (1 << 4)) {
747              dev_dbg(mv_chan_to_devp(chan),
748                      "ignore this error\n");
749              return;
750         }
751
752         dev_err(mv_chan_to_devp(chan),
753                 "error on chan %d. intr cause 0x%08x\n",
754                 chan->idx, intr_cause);
755
756         mv_dump_xor_regs(chan);
757         BUG();
758 }
759
760 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
761 {
762         struct mv_xor_chan *chan = data;
763         u32 intr_cause = mv_chan_get_intr_cause(chan);
764
765         dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
766
767         if (mv_is_err_intr(intr_cause))
768                 mv_xor_err_interrupt_handler(chan, intr_cause);
769
770         tasklet_schedule(&chan->irq_tasklet);
771
772         mv_xor_device_clear_eoc_cause(chan);
773
774         return IRQ_HANDLED;
775 }
776
777 static void mv_xor_issue_pending(struct dma_chan *chan)
778 {
779         struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
780
781         if (mv_chan->pending >= MV_XOR_THRESHOLD) {
782                 mv_chan->pending = 0;
783                 mv_chan_activate(mv_chan);
784         }
785 }
786
787 /*
788  * Perform a transaction to verify the HW works.
789  */
790 #define MV_XOR_TEST_SIZE 2000
791
792 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
793 {
794         int i;
795         void *src, *dest;
796         dma_addr_t src_dma, dest_dma;
797         struct dma_chan *dma_chan;
798         dma_cookie_t cookie;
799         struct dma_async_tx_descriptor *tx;
800         int err = 0;
801
802         src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
803         if (!src)
804                 return -ENOMEM;
805
806         dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
807         if (!dest) {
808                 kfree(src);
809                 return -ENOMEM;
810         }
811
812         /* Fill in src buffer */
813         for (i = 0; i < MV_XOR_TEST_SIZE; i++)
814                 ((u8 *) src)[i] = (u8)i;
815
816         dma_chan = &mv_chan->dmachan;
817         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
818                 err = -ENODEV;
819                 goto out;
820         }
821
822         dest_dma = dma_map_single(dma_chan->device->dev, dest,
823                                   MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
824
825         src_dma = dma_map_single(dma_chan->device->dev, src,
826                                  MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
827
828         tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
829                                     MV_XOR_TEST_SIZE, 0);
830         cookie = mv_xor_tx_submit(tx);
831         mv_xor_issue_pending(dma_chan);
832         async_tx_ack(tx);
833         msleep(1);
834
835         if (mv_xor_status(dma_chan, cookie, NULL) !=
836             DMA_COMPLETE) {
837                 dev_err(dma_chan->device->dev,
838                         "Self-test copy timed out, disabling\n");
839                 err = -ENODEV;
840                 goto free_resources;
841         }
842
843         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
844                                 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
845         if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
846                 dev_err(dma_chan->device->dev,
847                         "Self-test copy failed compare, disabling\n");
848                 err = -ENODEV;
849                 goto free_resources;
850         }
851
852 free_resources:
853         mv_xor_free_chan_resources(dma_chan);
854 out:
855         kfree(src);
856         kfree(dest);
857         return err;
858 }
859
860 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
861 static int
862 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
863 {
864         int i, src_idx;
865         struct page *dest;
866         struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
867         dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
868         dma_addr_t dest_dma;
869         struct dma_async_tx_descriptor *tx;
870         struct dma_chan *dma_chan;
871         dma_cookie_t cookie;
872         u8 cmp_byte = 0;
873         u32 cmp_word;
874         int err = 0;
875
876         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
877                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
878                 if (!xor_srcs[src_idx]) {
879                         while (src_idx--)
880                                 __free_page(xor_srcs[src_idx]);
881                         return -ENOMEM;
882                 }
883         }
884
885         dest = alloc_page(GFP_KERNEL);
886         if (!dest) {
887                 while (src_idx--)
888                         __free_page(xor_srcs[src_idx]);
889                 return -ENOMEM;
890         }
891
892         /* Fill in src buffers */
893         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
894                 u8 *ptr = page_address(xor_srcs[src_idx]);
895                 for (i = 0; i < PAGE_SIZE; i++)
896                         ptr[i] = (1 << src_idx);
897         }
898
899         for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
900                 cmp_byte ^= (u8) (1 << src_idx);
901
902         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
903                 (cmp_byte << 8) | cmp_byte;
904
905         memset(page_address(dest), 0, PAGE_SIZE);
906
907         dma_chan = &mv_chan->dmachan;
908         if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
909                 err = -ENODEV;
910                 goto out;
911         }
912
913         /* test xor */
914         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
915                                 DMA_FROM_DEVICE);
916
917         for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
918                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
919                                            0, PAGE_SIZE, DMA_TO_DEVICE);
920
921         tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
922                                  MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
923
924         cookie = mv_xor_tx_submit(tx);
925         mv_xor_issue_pending(dma_chan);
926         async_tx_ack(tx);
927         msleep(8);
928
929         if (mv_xor_status(dma_chan, cookie, NULL) !=
930             DMA_COMPLETE) {
931                 dev_err(dma_chan->device->dev,
932                         "Self-test xor timed out, disabling\n");
933                 err = -ENODEV;
934                 goto free_resources;
935         }
936
937         dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
938                                 PAGE_SIZE, DMA_FROM_DEVICE);
939         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
940                 u32 *ptr = page_address(dest);
941                 if (ptr[i] != cmp_word) {
942                         dev_err(dma_chan->device->dev,
943                                 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
944                                 i, ptr[i], cmp_word);
945                         err = -ENODEV;
946                         goto free_resources;
947                 }
948         }
949
950 free_resources:
951         mv_xor_free_chan_resources(dma_chan);
952 out:
953         src_idx = MV_XOR_NUM_SRC_TEST;
954         while (src_idx--)
955                 __free_page(xor_srcs[src_idx]);
956         __free_page(dest);
957         return err;
958 }
959
960 /* This driver does not implement any of the optional DMA operations. */
961 static int
962 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
963                unsigned long arg)
964 {
965         return -ENOSYS;
966 }
967
968 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
969 {
970         struct dma_chan *chan, *_chan;
971         struct device *dev = mv_chan->dmadev.dev;
972
973         dma_async_device_unregister(&mv_chan->dmadev);
974
975         dma_free_coherent(dev, MV_XOR_POOL_SIZE,
976                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
977
978         list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
979                                  device_node) {
980                 list_del(&chan->device_node);
981         }
982
983         free_irq(mv_chan->irq, mv_chan);
984
985         return 0;
986 }
987
988 static struct mv_xor_chan *
989 mv_xor_channel_add(struct mv_xor_device *xordev,
990                    struct platform_device *pdev,
991                    int idx, dma_cap_mask_t cap_mask, int irq)
992 {
993         int ret = 0;
994         struct mv_xor_chan *mv_chan;
995         struct dma_device *dma_dev;
996
997         mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
998         if (!mv_chan)
999                 return ERR_PTR(-ENOMEM);
1000
1001         mv_chan->idx = idx;
1002         mv_chan->irq = irq;
1003
1004         dma_dev = &mv_chan->dmadev;
1005
1006         /* allocate coherent memory for hardware descriptors
1007          * note: writecombine gives slightly better performance, but
1008          * requires that we explicitly flush the writes
1009          */
1010         mv_chan->dma_desc_pool_virt =
1011           dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
1012                                  &mv_chan->dma_desc_pool, GFP_KERNEL);
1013         if (!mv_chan->dma_desc_pool_virt)
1014                 return ERR_PTR(-ENOMEM);
1015
1016         /* discover transaction capabilites from the platform data */
1017         dma_dev->cap_mask = cap_mask;
1018
1019         INIT_LIST_HEAD(&dma_dev->channels);
1020
1021         /* set base routines */
1022         dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1023         dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1024         dma_dev->device_tx_status = mv_xor_status;
1025         dma_dev->device_issue_pending = mv_xor_issue_pending;
1026         dma_dev->device_control = mv_xor_control;
1027         dma_dev->dev = &pdev->dev;
1028
1029         /* set prep routines based on capability */
1030         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1031                 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1032         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1033                 dma_dev->max_xor = 8;
1034                 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1035         }
1036
1037         mv_chan->mmr_base = xordev->xor_base;
1038         mv_chan->mmr_high_base = xordev->xor_high_base;
1039         tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1040                      mv_chan);
1041
1042         /* clear errors before enabling interrupts */
1043         mv_xor_device_clear_err_status(mv_chan);
1044
1045         ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
1046                           0, dev_name(&pdev->dev), mv_chan);
1047         if (ret)
1048                 goto err_free_dma;
1049
1050         mv_chan_unmask_interrupts(mv_chan);
1051
1052         mv_set_mode(mv_chan, DMA_MEMCPY);
1053
1054         spin_lock_init(&mv_chan->lock);
1055         INIT_LIST_HEAD(&mv_chan->chain);
1056         INIT_LIST_HEAD(&mv_chan->completed_slots);
1057         INIT_LIST_HEAD(&mv_chan->all_slots);
1058         mv_chan->dmachan.device = dma_dev;
1059         dma_cookie_init(&mv_chan->dmachan);
1060
1061         list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
1062
1063         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1064                 ret = mv_xor_memcpy_self_test(mv_chan);
1065                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1066                 if (ret)
1067                         goto err_free_irq;
1068         }
1069
1070         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1071                 ret = mv_xor_xor_self_test(mv_chan);
1072                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1073                 if (ret)
1074                         goto err_free_irq;
1075         }
1076
1077         dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
1078                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1079                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1080                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1081
1082         dma_async_device_register(dma_dev);
1083         return mv_chan;
1084
1085 err_free_irq:
1086         free_irq(mv_chan->irq, mv_chan);
1087  err_free_dma:
1088         dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
1089                           mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
1090         return ERR_PTR(ret);
1091 }
1092
1093 static void
1094 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
1095                          const struct mbus_dram_target_info *dram)
1096 {
1097         void __iomem *base = xordev->xor_high_base;
1098         u32 win_enable = 0;
1099         int i;
1100
1101         for (i = 0; i < 8; i++) {
1102                 writel(0, base + WINDOW_BASE(i));
1103                 writel(0, base + WINDOW_SIZE(i));
1104                 if (i < 4)
1105                         writel(0, base + WINDOW_REMAP_HIGH(i));
1106         }
1107
1108         for (i = 0; i < dram->num_cs; i++) {
1109                 const struct mbus_dram_window *cs = dram->cs + i;
1110
1111                 writel((cs->base & 0xffff0000) |
1112                        (cs->mbus_attr << 8) |
1113                        dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1114                 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1115
1116                 win_enable |= (1 << i);
1117                 win_enable |= 3 << (16 + (2 * i));
1118         }
1119
1120         writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1121         writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1122         writel(0, base + WINDOW_OVERRIDE_CTRL(0));
1123         writel(0, base + WINDOW_OVERRIDE_CTRL(1));
1124 }
1125
1126 static int mv_xor_probe(struct platform_device *pdev)
1127 {
1128         const struct mbus_dram_target_info *dram;
1129         struct mv_xor_device *xordev;
1130         struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
1131         struct resource *res;
1132         int i, ret;
1133
1134         dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
1135
1136         xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
1137         if (!xordev)
1138                 return -ENOMEM;
1139
1140         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1141         if (!res)
1142                 return -ENODEV;
1143
1144         xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
1145                                         resource_size(res));
1146         if (!xordev->xor_base)
1147                 return -EBUSY;
1148
1149         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1150         if (!res)
1151                 return -ENODEV;
1152
1153         xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1154                                              resource_size(res));
1155         if (!xordev->xor_high_base)
1156                 return -EBUSY;
1157
1158         platform_set_drvdata(pdev, xordev);
1159
1160         /*
1161          * (Re-)program MBUS remapping windows if we are asked to.
1162          */
1163         dram = mv_mbus_dram_info();
1164         if (dram)
1165                 mv_xor_conf_mbus_windows(xordev, dram);
1166
1167         /* Not all platforms can gate the clock, so it is not
1168          * an error if the clock does not exists.
1169          */
1170         xordev->clk = clk_get(&pdev->dev, NULL);
1171         if (!IS_ERR(xordev->clk))
1172                 clk_prepare_enable(xordev->clk);
1173
1174         if (pdev->dev.of_node) {
1175                 struct device_node *np;
1176                 int i = 0;
1177
1178                 for_each_child_of_node(pdev->dev.of_node, np) {
1179                         dma_cap_mask_t cap_mask;
1180                         int irq;
1181
1182                         dma_cap_zero(cap_mask);
1183                         if (of_property_read_bool(np, "dmacap,memcpy"))
1184                                 dma_cap_set(DMA_MEMCPY, cap_mask);
1185                         if (of_property_read_bool(np, "dmacap,xor"))
1186                                 dma_cap_set(DMA_XOR, cap_mask);
1187                         if (of_property_read_bool(np, "dmacap,interrupt"))
1188                                 dma_cap_set(DMA_INTERRUPT, cap_mask);
1189
1190                         irq = irq_of_parse_and_map(np, 0);
1191                         if (!irq) {
1192                                 ret = -ENODEV;
1193                                 goto err_channel_add;
1194                         }
1195
1196                         xordev->channels[i] =
1197                                 mv_xor_channel_add(xordev, pdev, i,
1198                                                    cap_mask, irq);
1199                         if (IS_ERR(xordev->channels[i])) {
1200                                 ret = PTR_ERR(xordev->channels[i]);
1201                                 xordev->channels[i] = NULL;
1202                                 irq_dispose_mapping(irq);
1203                                 goto err_channel_add;
1204                         }
1205
1206                         i++;
1207                 }
1208         } else if (pdata && pdata->channels) {
1209                 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1210                         struct mv_xor_channel_data *cd;
1211                         int irq;
1212
1213                         cd = &pdata->channels[i];
1214                         if (!cd) {
1215                                 ret = -ENODEV;
1216                                 goto err_channel_add;
1217                         }
1218
1219                         irq = platform_get_irq(pdev, i);
1220                         if (irq < 0) {
1221                                 ret = irq;
1222                                 goto err_channel_add;
1223                         }
1224
1225                         xordev->channels[i] =
1226                                 mv_xor_channel_add(xordev, pdev, i,
1227                                                    cd->cap_mask, irq);
1228                         if (IS_ERR(xordev->channels[i])) {
1229                                 ret = PTR_ERR(xordev->channels[i]);
1230                                 goto err_channel_add;
1231                         }
1232                 }
1233         }
1234
1235         return 0;
1236
1237 err_channel_add:
1238         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
1239                 if (xordev->channels[i]) {
1240                         mv_xor_channel_remove(xordev->channels[i]);
1241                         if (pdev->dev.of_node)
1242                                 irq_dispose_mapping(xordev->channels[i]->irq);
1243                 }
1244
1245         if (!IS_ERR(xordev->clk)) {
1246                 clk_disable_unprepare(xordev->clk);
1247                 clk_put(xordev->clk);
1248         }
1249
1250         return ret;
1251 }
1252
1253 static int mv_xor_remove(struct platform_device *pdev)
1254 {
1255         struct mv_xor_device *xordev = platform_get_drvdata(pdev);
1256         int i;
1257
1258         for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
1259                 if (xordev->channels[i])
1260                         mv_xor_channel_remove(xordev->channels[i]);
1261         }
1262
1263         if (!IS_ERR(xordev->clk)) {
1264                 clk_disable_unprepare(xordev->clk);
1265                 clk_put(xordev->clk);
1266         }
1267
1268         return 0;
1269 }
1270
1271 #ifdef CONFIG_OF
1272 static struct of_device_id mv_xor_dt_ids[] = {
1273        { .compatible = "marvell,orion-xor", },
1274        {},
1275 };
1276 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
1277 #endif
1278
1279 static struct platform_driver mv_xor_driver = {
1280         .probe          = mv_xor_probe,
1281         .remove         = mv_xor_remove,
1282         .driver         = {
1283                 .owner          = THIS_MODULE,
1284                 .name           = MV_XOR_NAME,
1285                 .of_match_table = of_match_ptr(mv_xor_dt_ids),
1286         },
1287 };
1288
1289
1290 static int __init mv_xor_init(void)
1291 {
1292         return platform_driver_register(&mv_xor_driver);
1293 }
1294 module_init(mv_xor_init);
1295
1296 /* it's currently unsafe to unload this module */
1297 #if 0
1298 static void __exit mv_xor_exit(void)
1299 {
1300         platform_driver_unregister(&mv_xor_driver);
1301         return;
1302 }
1303
1304 module_exit(mv_xor_exit);
1305 #endif
1306
1307 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1308 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1309 MODULE_LICENSE("GPL");