Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[linux-drm-fsl-dcu.git] / arch / arm / include / asm / hardware / iop3xx-adma.h
1 /*
2  * Copyright © 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  */
18 #ifndef _ADMA_H
19 #define _ADMA_H
20 #include <linux/types.h>
21 #include <linux/io.h>
22 #include <mach/hardware.h>
23 #include <asm/hardware/iop_adma.h>
24
25 /* Memory copy units */
26 #define DMA_CCR(chan)           (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan)           (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan)           (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan)          (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan)          (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan)          (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan)           (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan)           (chan->mmr_base + 0x24)
35
36 /* Application accelerator unit  */
37 #define AAU_ACR(chan)           (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan)           (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan)          (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan)      (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan)           (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan)          (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan)          (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc)   (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX   8
47 #define AAU_EDCR1_IDX   17
48 #define AAU_EDCR2_IDX   26
49
50 #define DMA0_ID 0
51 #define DMA1_ID 1
52 #define AAU_ID 2
53
54 struct iop3xx_aau_desc_ctrl {
55         unsigned int int_en:1;
56         unsigned int blk1_cmd_ctrl:3;
57         unsigned int blk2_cmd_ctrl:3;
58         unsigned int blk3_cmd_ctrl:3;
59         unsigned int blk4_cmd_ctrl:3;
60         unsigned int blk5_cmd_ctrl:3;
61         unsigned int blk6_cmd_ctrl:3;
62         unsigned int blk7_cmd_ctrl:3;
63         unsigned int blk8_cmd_ctrl:3;
64         unsigned int blk_ctrl:2;
65         unsigned int dual_xor_en:1;
66         unsigned int tx_complete:1;
67         unsigned int zero_result_err:1;
68         unsigned int zero_result_en:1;
69         unsigned int dest_write_en:1;
70 };
71
72 struct iop3xx_aau_e_desc_ctrl {
73         unsigned int reserved:1;
74         unsigned int blk1_cmd_ctrl:3;
75         unsigned int blk2_cmd_ctrl:3;
76         unsigned int blk3_cmd_ctrl:3;
77         unsigned int blk4_cmd_ctrl:3;
78         unsigned int blk5_cmd_ctrl:3;
79         unsigned int blk6_cmd_ctrl:3;
80         unsigned int blk7_cmd_ctrl:3;
81         unsigned int blk8_cmd_ctrl:3;
82         unsigned int reserved2:7;
83 };
84
85 struct iop3xx_dma_desc_ctrl {
86         unsigned int pci_transaction:4;
87         unsigned int int_en:1;
88         unsigned int dac_cycle_en:1;
89         unsigned int mem_to_mem_en:1;
90         unsigned int crc_data_tx_en:1;
91         unsigned int crc_gen_en:1;
92         unsigned int crc_seed_dis:1;
93         unsigned int reserved:21;
94         unsigned int crc_tx_complete:1;
95 };
96
97 struct iop3xx_desc_dma {
98         u32 next_desc;
99         union {
100                 u32 pci_src_addr;
101                 u32 pci_dest_addr;
102                 u32 src_addr;
103         };
104         union {
105                 u32 upper_pci_src_addr;
106                 u32 upper_pci_dest_addr;
107         };
108         union {
109                 u32 local_pci_src_addr;
110                 u32 local_pci_dest_addr;
111                 u32 dest_addr;
112         };
113         u32 byte_count;
114         union {
115                 u32 desc_ctrl;
116                 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
117         };
118         u32 crc_addr;
119 };
120
121 struct iop3xx_desc_aau {
122         u32 next_desc;
123         u32 src[4];
124         u32 dest_addr;
125         u32 byte_count;
126         union {
127                 u32 desc_ctrl;
128                 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
129         };
130         union {
131                 u32 src_addr;
132                 u32 e_desc_ctrl;
133                 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134         } src_edc[31];
135 };
136
137 struct iop3xx_aau_gfmr {
138         unsigned int gfmr1:8;
139         unsigned int gfmr2:8;
140         unsigned int gfmr3:8;
141         unsigned int gfmr4:8;
142 };
143
144 struct iop3xx_desc_pq_xor {
145         u32 next_desc;
146         u32 src[3];
147         union {
148                 u32 data_mult1;
149                 struct iop3xx_aau_gfmr data_mult1_field;
150         };
151         u32 dest_addr;
152         u32 byte_count;
153         union {
154                 u32 desc_ctrl;
155                 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
156         };
157         union {
158                 u32 src_addr;
159                 u32 e_desc_ctrl;
160                 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161                 u32 data_multiplier;
162                 struct iop3xx_aau_gfmr data_mult_field;
163                 u32 reserved;
164         } src_edc_gfmr[19];
165 };
166
167 struct iop3xx_desc_dual_xor {
168         u32 next_desc;
169         u32 src0_addr;
170         u32 src1_addr;
171         u32 h_src_addr;
172         u32 d_src_addr;
173         u32 h_dest_addr;
174         u32 byte_count;
175         union {
176                 u32 desc_ctrl;
177                 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
178         };
179         u32 d_dest_addr;
180 };
181
182 union iop3xx_desc {
183         struct iop3xx_desc_aau *aau;
184         struct iop3xx_desc_dma *dma;
185         struct iop3xx_desc_pq_xor *pq_xor;
186         struct iop3xx_desc_dual_xor *dual_xor;
187         void *ptr;
188 };
189
190 /* No support for p+q operations */
191 static inline int
192 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
193 {
194         BUG();
195         return 0;
196 }
197
198 static inline void
199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
200                   unsigned long flags)
201 {
202         BUG();
203 }
204
205 static inline void
206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
207 {
208         BUG();
209 }
210
211 static inline void
212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
213                          dma_addr_t addr, unsigned char coef)
214 {
215         BUG();
216 }
217
218 static inline int
219 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
220 {
221         BUG();
222         return 0;
223 }
224
225 static inline void
226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
227                           unsigned long flags)
228 {
229         BUG();
230 }
231
232 static inline void
233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
234 {
235         BUG();
236 }
237
238 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
239
240 static inline void
241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
242                               dma_addr_t *src)
243 {
244         BUG();
245 }
246
247 static inline int iop_adma_get_max_xor(void)
248 {
249         return 32;
250 }
251
252 static inline int iop_adma_get_max_pq(void)
253 {
254         BUG();
255         return 0;
256 }
257
258 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
259 {
260         int id = chan->device->id;
261
262         switch (id) {
263         case DMA0_ID:
264         case DMA1_ID:
265                 return __raw_readl(DMA_DAR(chan));
266         case AAU_ID:
267                 return __raw_readl(AAU_ADAR(chan));
268         default:
269                 BUG();
270         }
271         return 0;
272 }
273
274 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
275                                                 u32 next_desc_addr)
276 {
277         int id = chan->device->id;
278
279         switch (id) {
280         case DMA0_ID:
281         case DMA1_ID:
282                 __raw_writel(next_desc_addr, DMA_NDAR(chan));
283                 break;
284         case AAU_ID:
285                 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
286                 break;
287         }
288
289 }
290
291 #define IOP_ADMA_STATUS_BUSY (1 << 10)
292 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
293 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
294 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
295
296 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
297 {
298         u32 status = __raw_readl(DMA_CSR(chan));
299         return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
300 }
301
302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
303                                         int num_slots)
304 {
305         /* num_slots will only ever be 1, 2, 4, or 8 */
306         return (desc->idx & (num_slots - 1)) ? 0 : 1;
307 }
308
309 /* to do: support large (i.e. > hw max) buffer sizes */
310 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
311 {
312         *slots_per_op = 1;
313         return 1;
314 }
315
316 /* to do: support large (i.e. > hw max) buffer sizes */
317 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
318 {
319         *slots_per_op = 1;
320         return 1;
321 }
322
323 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
324                                         int *slots_per_op)
325 {
326         static const char slot_count_table[] = {
327                                                 1, 1, 1, 1, /* 01 - 04 */
328                                                 2, 2, 2, 2, /* 05 - 08 */
329                                                 4, 4, 4, 4, /* 09 - 12 */
330                                                 4, 4, 4, 4, /* 13 - 16 */
331                                                 8, 8, 8, 8, /* 17 - 20 */
332                                                 8, 8, 8, 8, /* 21 - 24 */
333                                                 8, 8, 8, 8, /* 25 - 28 */
334                                                 8, 8, 8, 8, /* 29 - 32 */
335                                               };
336         *slots_per_op = slot_count_table[src_cnt - 1];
337         return *slots_per_op;
338 }
339
340 static inline int
341 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
342 {
343         switch (chan->device->id) {
344         case DMA0_ID:
345         case DMA1_ID:
346                 return iop_chan_memcpy_slot_count(0, slots_per_op);
347         case AAU_ID:
348                 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
349         default:
350                 BUG();
351         }
352         return 0;
353 }
354
355 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
356                                                 int *slots_per_op)
357 {
358         int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
359
360         if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
361                 return slot_cnt;
362
363         len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
364         while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
365                 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
366                 slot_cnt += *slots_per_op;
367         }
368
369         slot_cnt += *slots_per_op;
370
371         return slot_cnt;
372 }
373
374 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
375  * descriptors
376  */
377 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
378                                                 int *slots_per_op)
379 {
380         int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
381
382         if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
383                 return slot_cnt;
384
385         len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
386         while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
387                 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
388                 slot_cnt += *slots_per_op;
389         }
390
391         slot_cnt += *slots_per_op;
392
393         return slot_cnt;
394 }
395
396 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
397                                         struct iop_adma_chan *chan)
398 {
399         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
400
401         switch (chan->device->id) {
402         case DMA0_ID:
403         case DMA1_ID:
404                 return hw_desc.dma->byte_count;
405         case AAU_ID:
406                 return hw_desc.aau->byte_count;
407         default:
408                 BUG();
409         }
410         return 0;
411 }
412
413 /* translate the src_idx to a descriptor word index */
414 static inline int __desc_idx(int src_idx)
415 {
416         static const int desc_idx_table[] = { 0, 0, 0, 0,
417                                               0, 1, 2, 3,
418                                               5, 6, 7, 8,
419                                               9, 10, 11, 12,
420                                               14, 15, 16, 17,
421                                               18, 19, 20, 21,
422                                               23, 24, 25, 26,
423                                               27, 28, 29, 30,
424                                             };
425
426         return desc_idx_table[src_idx];
427 }
428
429 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
430                                         struct iop_adma_chan *chan,
431                                         int src_idx)
432 {
433         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
434
435         switch (chan->device->id) {
436         case DMA0_ID:
437         case DMA1_ID:
438                 return hw_desc.dma->src_addr;
439         case AAU_ID:
440                 break;
441         default:
442                 BUG();
443         }
444
445         if (src_idx < 4)
446                 return hw_desc.aau->src[src_idx];
447         else
448                 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
449 }
450
451 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
452                                         int src_idx, dma_addr_t addr)
453 {
454         if (src_idx < 4)
455                 hw_desc->src[src_idx] = addr;
456         else
457                 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
458 }
459
460 static inline void
461 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
462 {
463         struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
464         union {
465                 u32 value;
466                 struct iop3xx_dma_desc_ctrl field;
467         } u_desc_ctrl;
468
469         u_desc_ctrl.value = 0;
470         u_desc_ctrl.field.mem_to_mem_en = 1;
471         u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
472         u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
473         hw_desc->desc_ctrl = u_desc_ctrl.value;
474         hw_desc->upper_pci_src_addr = 0;
475         hw_desc->crc_addr = 0;
476 }
477
478 static inline void
479 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
480 {
481         struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
482         union {
483                 u32 value;
484                 struct iop3xx_aau_desc_ctrl field;
485         } u_desc_ctrl;
486
487         u_desc_ctrl.value = 0;
488         u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
489         u_desc_ctrl.field.dest_write_en = 1;
490         u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
491         hw_desc->desc_ctrl = u_desc_ctrl.value;
492 }
493
494 static inline u32
495 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
496                      unsigned long flags)
497 {
498         int i, shift;
499         u32 edcr;
500         union {
501                 u32 value;
502                 struct iop3xx_aau_desc_ctrl field;
503         } u_desc_ctrl;
504
505         u_desc_ctrl.value = 0;
506         switch (src_cnt) {
507         case 25 ... 32:
508                 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
509                 edcr = 0;
510                 shift = 1;
511                 for (i = 24; i < src_cnt; i++) {
512                         edcr |= (1 << shift);
513                         shift += 3;
514                 }
515                 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
516                 src_cnt = 24;
517                 /* fall through */
518         case 17 ... 24:
519                 if (!u_desc_ctrl.field.blk_ctrl) {
520                         hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
521                         u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
522                 }
523                 edcr = 0;
524                 shift = 1;
525                 for (i = 16; i < src_cnt; i++) {
526                         edcr |= (1 << shift);
527                         shift += 3;
528                 }
529                 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
530                 src_cnt = 16;
531                 /* fall through */
532         case 9 ... 16:
533                 if (!u_desc_ctrl.field.blk_ctrl)
534                         u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
535                 edcr = 0;
536                 shift = 1;
537                 for (i = 8; i < src_cnt; i++) {
538                         edcr |= (1 << shift);
539                         shift += 3;
540                 }
541                 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
542                 src_cnt = 8;
543                 /* fall through */
544         case 2 ... 8:
545                 shift = 1;
546                 for (i = 0; i < src_cnt; i++) {
547                         u_desc_ctrl.value |= (1 << shift);
548                         shift += 3;
549                 }
550
551                 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
552                         u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
553         }
554
555         u_desc_ctrl.field.dest_write_en = 1;
556         u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
557         u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
558         hw_desc->desc_ctrl = u_desc_ctrl.value;
559
560         return u_desc_ctrl.value;
561 }
562
563 static inline void
564 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
565                   unsigned long flags)
566 {
567         iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
568 }
569
570 /* return the number of operations */
571 static inline int
572 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
573                        unsigned long flags)
574 {
575         int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
576         struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
577         union {
578                 u32 value;
579                 struct iop3xx_aau_desc_ctrl field;
580         } u_desc_ctrl;
581         int i, j;
582
583         hw_desc = desc->hw_desc;
584
585         for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
586                 i += slots_per_op, j++) {
587                 iter = iop_hw_desc_slot_idx(hw_desc, i);
588                 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
589                 u_desc_ctrl.field.dest_write_en = 0;
590                 u_desc_ctrl.field.zero_result_en = 1;
591                 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
592                 iter->desc_ctrl = u_desc_ctrl.value;
593
594                 /* for the subsequent descriptors preserve the store queue
595                  * and chain them together
596                  */
597                 if (i) {
598                         prev_hw_desc =
599                                 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
600                         prev_hw_desc->next_desc =
601                                 (u32) (desc->async_tx.phys + (i << 5));
602                 }
603         }
604
605         return j;
606 }
607
608 static inline void
609 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
610                        unsigned long flags)
611 {
612         struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
613         union {
614                 u32 value;
615                 struct iop3xx_aau_desc_ctrl field;
616         } u_desc_ctrl;
617
618         u_desc_ctrl.value = 0;
619         switch (src_cnt) {
620         case 25 ... 32:
621                 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
622                 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
623                 /* fall through */
624         case 17 ... 24:
625                 if (!u_desc_ctrl.field.blk_ctrl) {
626                         hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
627                         u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
628                 }
629                 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
630                 /* fall through */
631         case 9 ... 16:
632                 if (!u_desc_ctrl.field.blk_ctrl)
633                         u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
634                 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
635                 /* fall through */
636         case 1 ... 8:
637                 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
638                         u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
639         }
640
641         u_desc_ctrl.field.dest_write_en = 0;
642         u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
643         hw_desc->desc_ctrl = u_desc_ctrl.value;
644 }
645
646 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
647                                         struct iop_adma_chan *chan,
648                                         u32 byte_count)
649 {
650         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
651
652         switch (chan->device->id) {
653         case DMA0_ID:
654         case DMA1_ID:
655                 hw_desc.dma->byte_count = byte_count;
656                 break;
657         case AAU_ID:
658                 hw_desc.aau->byte_count = byte_count;
659                 break;
660         default:
661                 BUG();
662         }
663 }
664
665 static inline void
666 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
667                         struct iop_adma_chan *chan)
668 {
669         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
670
671         switch (chan->device->id) {
672         case DMA0_ID:
673         case DMA1_ID:
674                 iop_desc_init_memcpy(desc, 1);
675                 hw_desc.dma->byte_count = 0;
676                 hw_desc.dma->dest_addr = 0;
677                 hw_desc.dma->src_addr = 0;
678                 break;
679         case AAU_ID:
680                 iop_desc_init_null_xor(desc, 2, 1);
681                 hw_desc.aau->byte_count = 0;
682                 hw_desc.aau->dest_addr = 0;
683                 hw_desc.aau->src[0] = 0;
684                 hw_desc.aau->src[1] = 0;
685                 break;
686         default:
687                 BUG();
688         }
689 }
690
691 static inline void
692 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
693 {
694         int slots_per_op = desc->slots_per_op;
695         struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
696         int i = 0;
697
698         if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
699                 hw_desc->byte_count = len;
700         } else {
701                 do {
702                         iter = iop_hw_desc_slot_idx(hw_desc, i);
703                         iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
704                         len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
705                         i += slots_per_op;
706                 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
707
708                 iter = iop_hw_desc_slot_idx(hw_desc, i);
709                 iter->byte_count = len;
710         }
711 }
712
713 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
714                                         struct iop_adma_chan *chan,
715                                         dma_addr_t addr)
716 {
717         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
718
719         switch (chan->device->id) {
720         case DMA0_ID:
721         case DMA1_ID:
722                 hw_desc.dma->dest_addr = addr;
723                 break;
724         case AAU_ID:
725                 hw_desc.aau->dest_addr = addr;
726                 break;
727         default:
728                 BUG();
729         }
730 }
731
732 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
733                                         dma_addr_t addr)
734 {
735         struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
736         hw_desc->src_addr = addr;
737 }
738
739 static inline void
740 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
741                                 dma_addr_t addr)
742 {
743
744         struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
745         int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
746         int i;
747
748         for (i = 0; (slot_cnt -= slots_per_op) >= 0;
749                 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
750                 iter = iop_hw_desc_slot_idx(hw_desc, i);
751                 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
752         }
753 }
754
755 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
756                                         int src_idx, dma_addr_t addr)
757 {
758
759         struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
760         int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
761         int i;
762
763         for (i = 0; (slot_cnt -= slots_per_op) >= 0;
764                 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
765                 iter = iop_hw_desc_slot_idx(hw_desc, i);
766                 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
767         }
768 }
769
770 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
771                                         u32 next_desc_addr)
772 {
773         /* hw_desc->next_desc is the same location for all channels */
774         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
775
776         iop_paranoia(hw_desc.dma->next_desc);
777         hw_desc.dma->next_desc = next_desc_addr;
778 }
779
780 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
781 {
782         /* hw_desc->next_desc is the same location for all channels */
783         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
784         return hw_desc.dma->next_desc;
785 }
786
787 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
788 {
789         /* hw_desc->next_desc is the same location for all channels */
790         union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
791         hw_desc.dma->next_desc = 0;
792 }
793
794 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
795                                                 u32 val)
796 {
797         struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
798         hw_desc->src[0] = val;
799 }
800
801 static inline enum sum_check_flags
802 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
803 {
804         struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
805         struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
806
807         iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
808         return desc_ctrl.zero_result_err << SUM_CHECK_P;
809 }
810
811 static inline void iop_chan_append(struct iop_adma_chan *chan)
812 {
813         u32 dma_chan_ctrl;
814
815         dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
816         dma_chan_ctrl |= 0x2;
817         __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
818 }
819
820 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
821 {
822         return __raw_readl(DMA_CSR(chan));
823 }
824
825 static inline void iop_chan_disable(struct iop_adma_chan *chan)
826 {
827         u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
828         dma_chan_ctrl &= ~1;
829         __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
830 }
831
832 static inline void iop_chan_enable(struct iop_adma_chan *chan)
833 {
834         u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
835
836         dma_chan_ctrl |= 1;
837         __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
838 }
839
840 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
841 {
842         u32 status = __raw_readl(DMA_CSR(chan));
843         status &= (1 << 9);
844         __raw_writel(status, DMA_CSR(chan));
845 }
846
847 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
848 {
849         u32 status = __raw_readl(DMA_CSR(chan));
850         status &= (1 << 8);
851         __raw_writel(status, DMA_CSR(chan));
852 }
853
854 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
855 {
856         u32 status = __raw_readl(DMA_CSR(chan));
857
858         switch (chan->device->id) {
859         case DMA0_ID:
860         case DMA1_ID:
861                 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
862                 break;
863         case AAU_ID:
864                 status &= (1 << 5);
865                 break;
866         default:
867                 BUG();
868         }
869
870         __raw_writel(status, DMA_CSR(chan));
871 }
872
873 static inline int
874 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
875 {
876         return 0;
877 }
878
879 static inline int
880 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
881 {
882         return 0;
883 }
884
885 static inline int
886 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
887 {
888         return 0;
889 }
890
891 static inline int
892 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
893 {
894         return test_bit(5, &status);
895 }
896
897 static inline int
898 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
899 {
900         switch (chan->device->id) {
901         case DMA0_ID:
902         case DMA1_ID:
903                 return test_bit(2, &status);
904         default:
905                 return 0;
906         }
907 }
908
909 static inline int
910 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
911 {
912         switch (chan->device->id) {
913         case DMA0_ID:
914         case DMA1_ID:
915                 return test_bit(3, &status);
916         default:
917                 return 0;
918         }
919 }
920
921 static inline int
922 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
923 {
924         switch (chan->device->id) {
925         case DMA0_ID:
926         case DMA1_ID:
927                 return test_bit(1, &status);
928         default:
929                 return 0;
930         }
931 }
932 #endif /* _ADMA_H */